xref: /linux/drivers/net/wireless/realtek/rtw89/phy.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "acpi.h"
6 #include "chan.h"
7 #include "coex.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "phy.h"
12 #include "ps.h"
13 #include "reg.h"
14 #include "sar.h"
15 #include "txrx.h"
16 #include "util.h"
17 
rtw89_phy0_phy1_offset(struct rtw89_dev * rtwdev,u32 addr)18 static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
19 {
20 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
21 
22 	return phy->phy0_phy1_offset(rtwdev, addr);
23 }
24 
get_max_amsdu_len(struct rtw89_dev * rtwdev,const struct rtw89_ra_report * report)25 static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
26 			     const struct rtw89_ra_report *report)
27 {
28 	u32 bit_rate = report->bit_rate;
29 
30 	/* lower than ofdm, do not aggregate */
31 	if (bit_rate < 550)
32 		return 1;
33 
34 	/* avoid AMSDU for legacy rate */
35 	if (report->might_fallback_legacy)
36 		return 1;
37 
38 	/* lower than 20M vht 2ss mcs8, make it small */
39 	if (bit_rate < 1800)
40 		return 1200;
41 
42 	/* lower than 40M vht 2ss mcs9, make it medium */
43 	if (bit_rate < 4000)
44 		return 2600;
45 
46 	/* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
47 	if (bit_rate < 7000)
48 		return 3500;
49 
50 	return rtwdev->chip->max_amsdu_limit;
51 }
52 
get_mcs_ra_mask(u16 mcs_map,u8 highest_mcs,u8 gap)53 static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
54 {
55 	u64 ra_mask = 0;
56 	u8 mcs_cap;
57 	int i, nss;
58 
59 	for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
60 		mcs_cap = mcs_map & 0x3;
61 		switch (mcs_cap) {
62 		case 2:
63 			ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
64 			break;
65 		case 1:
66 			ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
67 			break;
68 		case 0:
69 			ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
70 			break;
71 		default:
72 			break;
73 		}
74 	}
75 
76 	return ra_mask;
77 }
78 
get_he_ra_mask(struct ieee80211_link_sta * link_sta)79 static u64 get_he_ra_mask(struct ieee80211_link_sta *link_sta)
80 {
81 	struct ieee80211_sta_he_cap cap = link_sta->he_cap;
82 	u16 mcs_map;
83 
84 	switch (link_sta->bandwidth) {
85 	case IEEE80211_STA_RX_BW_160:
86 		if (cap.he_cap_elem.phy_cap_info[0] &
87 		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
88 			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
89 		else
90 			mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
91 		break;
92 	default:
93 		mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
94 	}
95 
96 	/* MCS11, MCS9, MCS7 */
97 	return get_mcs_ra_mask(mcs_map, 11, 2);
98 }
99 
get_eht_mcs_ra_mask(u8 * max_nss,u8 start_mcs,u8 n_nss)100 static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss)
101 {
102 	u64 nss_mcs_shift;
103 	u64 nss_mcs_val;
104 	u64 mask = 0;
105 	int i, j;
106 	u8 nss;
107 
108 	for (i = 0; i < n_nss; i++) {
109 		nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX);
110 		if (!nss)
111 			continue;
112 
113 		nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0);
114 
115 		for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16)
116 			mask |= nss_mcs_val << nss_mcs_shift;
117 	}
118 
119 	return mask;
120 }
121 
get_eht_ra_mask(struct rtw89_vif_link * rtwvif_link,struct ieee80211_link_sta * link_sta)122 static u64 get_eht_ra_mask(struct rtw89_vif_link *rtwvif_link,
123 			   struct ieee80211_link_sta *link_sta)
124 {
125 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
126 	struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
127 	struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap;
128 	struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
129 	u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info;
130 
131 	switch (link_sta->bandwidth) {
132 	case IEEE80211_STA_RX_BW_320:
133 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320;
134 		/* MCS 9, 11, 13 */
135 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
136 	case IEEE80211_STA_RX_BW_160:
137 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160;
138 		/* MCS 9, 11, 13 */
139 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
140 	case IEEE80211_STA_RX_BW_20:
141 		if (vif->type == NL80211_IFTYPE_AP &&
142 		    !(he_phy_cap[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
143 			mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
144 			/* MCS 7, 9, 11, 13 */
145 			return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
146 		}
147 		fallthrough;
148 	case IEEE80211_STA_RX_BW_80:
149 	default:
150 		mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80;
151 		/* MCS 9, 11, 13 */
152 		return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
153 	}
154 }
155 
156 #define RA_FLOOR_TABLE_SIZE	7
157 #define RA_FLOOR_UP_GAP		3
rtw89_phy_ra_mask_rssi(struct rtw89_dev * rtwdev,u8 rssi,u8 ratr_state)158 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
159 				  u8 ratr_state)
160 {
161 	u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
162 	u8 rssi_lv = 0;
163 	u8 i;
164 
165 	rssi >>= 1;
166 	for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
167 		if (i >= ratr_state)
168 			rssi_lv_t[i] += RA_FLOOR_UP_GAP;
169 		if (rssi < rssi_lv_t[i]) {
170 			rssi_lv = i;
171 			break;
172 		}
173 	}
174 	if (rssi_lv == 0)
175 		return 0xffffffffffffffffULL;
176 	else if (rssi_lv == 1)
177 		return 0xfffffffffffffff0ULL;
178 	else if (rssi_lv == 2)
179 		return 0xffffffffffffefe0ULL;
180 	else if (rssi_lv == 3)
181 		return 0xffffffffffffcfc0ULL;
182 	else if (rssi_lv == 4)
183 		return 0xffffffffffff8f80ULL;
184 	else if (rssi_lv >= 5)
185 		return 0xffffffffffff0f00ULL;
186 
187 	return 0xffffffffffffffffULL;
188 }
189 
rtw89_phy_ra_mask_recover(u64 ra_mask,u64 ra_mask_bak)190 static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
191 {
192 	if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
193 		ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
194 
195 	if (ra_mask == 0)
196 		ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
197 
198 	return ra_mask;
199 }
200 
rtw89_phy_ra_mask_cfg(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link,struct ieee80211_link_sta * link_sta,const struct rtw89_chan * chan)201 static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev,
202 				 struct rtw89_sta_link *rtwsta_link,
203 				 struct ieee80211_link_sta *link_sta,
204 				 const struct rtw89_chan *chan)
205 {
206 	struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
207 	enum nl80211_band band;
208 	u64 cfg_mask;
209 
210 	if (!rtwsta_link->use_cfg_mask)
211 		return -1;
212 
213 	switch (chan->band_type) {
214 	case RTW89_BAND_2G:
215 		band = NL80211_BAND_2GHZ;
216 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
217 					   RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
218 		break;
219 	case RTW89_BAND_5G:
220 		band = NL80211_BAND_5GHZ;
221 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
222 					   RA_MASK_OFDM_RATES);
223 		break;
224 	case RTW89_BAND_6G:
225 		band = NL80211_BAND_6GHZ;
226 		cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy,
227 					   RA_MASK_OFDM_RATES);
228 		break;
229 	default:
230 		rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type);
231 		return -1;
232 	}
233 
234 	if (link_sta->eht_cap.has_eht) {
235 		cfg_mask |= u64_encode_bits(mask->control[band].eht_mcs[0],
236 					    RA_MASK_EHT_1SS_RATES);
237 		cfg_mask |= u64_encode_bits(mask->control[band].eht_mcs[1],
238 					    RA_MASK_EHT_2SS_RATES);
239 	} else if (link_sta->he_cap.has_he) {
240 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
241 					    RA_MASK_HE_1SS_RATES);
242 		cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
243 					    RA_MASK_HE_2SS_RATES);
244 	} else if (link_sta->vht_cap.vht_supported) {
245 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
246 					    RA_MASK_VHT_1SS_RATES);
247 		cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
248 					    RA_MASK_VHT_2SS_RATES);
249 	} else if (link_sta->ht_cap.ht_supported) {
250 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
251 					    RA_MASK_HT_1SS_RATES);
252 		cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
253 					    RA_MASK_HT_2SS_RATES);
254 	}
255 
256 	return cfg_mask;
257 }
258 
259 static const u64
260 rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
261 			     RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
262 static const u64
263 rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
264 			      RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
265 static const u64
266 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
267 			     RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
268 static const u64
269 rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES,
270 			      RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES};
271 static const u64
272 rtw89_ra_mask_eht_mcs0_11[4] = {RA_MASK_EHT_1SS_MCS0_11, RA_MASK_EHT_2SS_MCS0_11,
273 				RA_MASK_EHT_3SS_MCS0_11, RA_MASK_EHT_4SS_MCS0_11};
274 
rtw89_phy_ra_gi_ltf(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link,struct ieee80211_link_sta * link_sta,const struct rtw89_chan * chan,bool * fix_giltf_en,u8 * fix_giltf)275 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev,
276 				struct rtw89_sta_link *rtwsta_link,
277 				struct ieee80211_link_sta *link_sta,
278 				const struct rtw89_chan *chan,
279 				bool *fix_giltf_en, u8 *fix_giltf)
280 {
281 	struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask;
282 	u8 band = chan->band_type;
283 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
284 	u8 ltf, gi;
285 
286 	*fix_giltf_en = true;
287 
288 	if (rtwdev->chip->chip_id == RTL8852C &&
289 	    chan->band_width == RTW89_CHANNEL_WIDTH_160 &&
290 	    rtw89_sta_link_has_su_mu_4xhe08(link_sta))
291 		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
292 	else
293 		*fix_giltf = RTW89_GILTF_2XHE08;
294 
295 	if (!rtwsta_link->use_cfg_mask)
296 		return;
297 
298 	if (link_sta->eht_cap.has_eht) {
299 		ltf = mask->control[nl_band].eht_ltf;
300 		gi = mask->control[nl_band].eht_gi;
301 	} else if (link_sta->he_cap.has_he) {
302 		ltf = mask->control[nl_band].he_ltf;
303 		gi = mask->control[nl_band].he_gi;
304 	} else {
305 		return;
306 	}
307 
308 	if (ltf == 2 && gi == 2)
309 		*fix_giltf = RTW89_GILTF_LGI_4XHE32;
310 	else if (ltf == 2 && gi == 0)
311 		*fix_giltf = RTW89_GILTF_SGI_4XHE08;
312 	else if (ltf == 1 && gi == 1)
313 		*fix_giltf = RTW89_GILTF_2XHE16;
314 	else if (ltf == 1 && gi == 0)
315 		*fix_giltf = RTW89_GILTF_2XHE08;
316 	else if (ltf == 0 && gi == 1)
317 		*fix_giltf = RTW89_GILTF_1XHE16;
318 	else if (ltf == 0 && gi == 0)
319 		*fix_giltf = RTW89_GILTF_1XHE08;
320 }
321 
rtw89_phy_ra_sta_update(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,struct ieee80211_link_sta * link_sta,bool p2p,bool csi)322 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
323 				    struct rtw89_vif_link *rtwvif_link,
324 				    struct rtw89_sta_link *rtwsta_link,
325 				    struct ieee80211_link_sta *link_sta,
326 				    bool p2p, bool csi)
327 {
328 	struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern;
329 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
330 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
331 						       rtwvif_link->chanctx_idx);
332 	const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
333 	u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi);
334 	u64 ra_mask = 0;
335 	u64 ra_mask_bak;
336 	u8 mode = 0;
337 	u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
338 	u8 bw_mode = 0;
339 	u8 stbc_en = 0;
340 	u8 ldpc_en = 0;
341 	u8 fix_giltf = 0;
342 	u8 i;
343 	bool sgi = false;
344 	bool fix_giltf_en = false;
345 
346 	memset(ra, 0, sizeof(*ra));
347 	/* Set the ra mask from sta's capability */
348 	if (link_sta->eht_cap.has_eht) {
349 		mode |= RTW89_RA_MODE_EHT;
350 		ra_mask |= get_eht_ra_mask(rtwvif_link, link_sta);
351 
352 		if (rtwdev->hal.no_mcs_12_13)
353 			high_rate_masks = rtw89_ra_mask_eht_mcs0_11;
354 		else
355 			high_rate_masks = rtw89_ra_mask_eht_rates;
356 
357 		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
358 				    chan, &fix_giltf_en, &fix_giltf);
359 	} else if (link_sta->he_cap.has_he) {
360 		mode |= RTW89_RA_MODE_HE;
361 		csi_mode = RTW89_RA_RPT_MODE_HE;
362 		ra_mask |= get_he_ra_mask(link_sta);
363 		high_rate_masks = rtw89_ra_mask_he_rates;
364 		if (link_sta->he_cap.he_cap_elem.phy_cap_info[2] &
365 		    IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
366 			stbc_en = 1;
367 		if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] &
368 		    IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
369 			ldpc_en = 1;
370 		rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta,
371 				    chan, &fix_giltf_en, &fix_giltf);
372 	} else if (link_sta->vht_cap.vht_supported) {
373 		u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map);
374 
375 		mode |= RTW89_RA_MODE_VHT;
376 		csi_mode = RTW89_RA_RPT_MODE_VHT;
377 		/* MCS9 (non-20MHz), MCS8, MCS7 */
378 		if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
379 			ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1);
380 		else
381 			ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
382 		high_rate_masks = rtw89_ra_mask_vht_rates;
383 		if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
384 			stbc_en = 1;
385 		if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
386 			ldpc_en = 1;
387 	} else if (link_sta->ht_cap.ht_supported) {
388 		mode |= RTW89_RA_MODE_HT;
389 		csi_mode = RTW89_RA_RPT_MODE_HT;
390 		ra_mask |= ((u64)link_sta->ht_cap.mcs.rx_mask[3] << 48) |
391 			   ((u64)link_sta->ht_cap.mcs.rx_mask[2] << 36) |
392 			   ((u64)link_sta->ht_cap.mcs.rx_mask[1] << 24) |
393 			   ((u64)link_sta->ht_cap.mcs.rx_mask[0] << 12);
394 		high_rate_masks = rtw89_ra_mask_ht_rates;
395 		if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
396 			stbc_en = 1;
397 		if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
398 			ldpc_en = 1;
399 	}
400 
401 	switch (chan->band_type) {
402 	case RTW89_BAND_2G:
403 		ra_mask |= link_sta->supp_rates[NL80211_BAND_2GHZ];
404 		if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xf)
405 			mode |= RTW89_RA_MODE_CCK;
406 		if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0)
407 			mode |= RTW89_RA_MODE_OFDM;
408 		break;
409 	case RTW89_BAND_5G:
410 		ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_5GHZ] << 4;
411 		mode |= RTW89_RA_MODE_OFDM;
412 		break;
413 	case RTW89_BAND_6G:
414 		ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_6GHZ] << 4;
415 		mode |= RTW89_RA_MODE_OFDM;
416 		break;
417 	default:
418 		rtw89_err(rtwdev, "Unknown band type\n");
419 		break;
420 	}
421 
422 	ra_mask_bak = ra_mask;
423 
424 	if (mode >= RTW89_RA_MODE_HT) {
425 		u64 mask = 0;
426 		for (i = 0; i < rtwdev->hal.tx_nss; i++)
427 			mask |= high_rate_masks[i];
428 		if (mode & RTW89_RA_MODE_OFDM)
429 			mask |= RA_MASK_SUBOFDM_RATES;
430 		if (mode & RTW89_RA_MODE_CCK)
431 			mask |= RA_MASK_SUBCCK_RATES;
432 		ra_mask &= mask;
433 	} else if (mode & RTW89_RA_MODE_OFDM) {
434 		ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
435 	}
436 
437 	if (mode != RTW89_RA_MODE_CCK)
438 		ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
439 
440 	ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
441 	ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
442 
443 	switch (link_sta->bandwidth) {
444 	case IEEE80211_STA_RX_BW_160:
445 		bw_mode = RTW89_CHANNEL_WIDTH_160;
446 		sgi = link_sta->vht_cap.vht_supported &&
447 		      (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
448 		break;
449 	case IEEE80211_STA_RX_BW_80:
450 		bw_mode = RTW89_CHANNEL_WIDTH_80;
451 		sgi = link_sta->vht_cap.vht_supported &&
452 		      (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
453 		break;
454 	case IEEE80211_STA_RX_BW_40:
455 		bw_mode = RTW89_CHANNEL_WIDTH_40;
456 		sgi = link_sta->ht_cap.ht_supported &&
457 		      (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
458 		break;
459 	default:
460 		bw_mode = RTW89_CHANNEL_WIDTH_20;
461 		sgi = link_sta->ht_cap.ht_supported &&
462 		      (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
463 		break;
464 	}
465 
466 	if (link_sta->he_cap.he_cap_elem.phy_cap_info[3] &
467 	    IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
468 		ra->dcm_cap = 1;
469 
470 	if (rate_pattern->enable && !p2p) {
471 		ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan);
472 		ra_mask &= rate_pattern->ra_mask;
473 		mode = rate_pattern->ra_mode;
474 	}
475 
476 	ra->bw_cap = bw_mode;
477 	ra->er_cap = rtwsta_link->er_cap;
478 	ra->mode_ctrl = mode;
479 	ra->macid = rtwsta_link->mac_id;
480 	ra->stbc_cap = stbc_en;
481 	ra->ldpc_cap = ldpc_en;
482 	ra->ss_num = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
483 	ra->en_sgi = sgi;
484 	ra->ra_mask = ra_mask;
485 	ra->fix_giltf_en = fix_giltf_en;
486 	ra->fix_giltf = fix_giltf;
487 	ra->partial_bw_er = link_sta->he_cap.has_he ?
488 			    !!(link_sta->he_cap.he_cap_elem.phy_cap_info[6] &
489 			       IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE) : 0;
490 	ra->band = chan->band_type;
491 
492 	if (!csi)
493 		return;
494 
495 	ra->fixed_csi_rate_en = false;
496 	ra->ra_csi_rate_en = true;
497 	ra->cr_tbl_sel = false;
498 	ra->band_num = rtwvif_link->phy_idx;
499 	ra->csi_bw = bw_mode;
500 	ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
501 	ra->csi_mcs_ss_idx = 5;
502 	ra->csi_mode = csi_mode;
503 }
504 
rtw89_phy_ra_update_sta_link(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link,u32 changed)505 void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev,
506 				  struct rtw89_sta_link *rtwsta_link,
507 				  u32 changed)
508 {
509 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
510 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
511 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
512 	struct ieee80211_link_sta *link_sta;
513 
514 	rcu_read_lock();
515 
516 	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
517 	rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
518 				link_sta, vif->p2p, false);
519 
520 	rcu_read_unlock();
521 
522 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED)
523 		ra->upd_mask = 1;
524 	if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED))
525 		ra->upd_bw_nss_mask = 1;
526 
527 	rtw89_debug(rtwdev, RTW89_DBG_RA,
528 		    "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
529 		    ra->macid,
530 		    ra->bw_cap,
531 		    ra->ss_num,
532 		    ra->en_sgi,
533 		    ra->giltf);
534 
535 	rtw89_fw_h2c_ra(rtwdev, ra, false);
536 }
537 
rtw89_phy_ra_update_sta(struct rtw89_dev * rtwdev,struct ieee80211_sta * sta,u32 changed)538 void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta,
539 			     u32 changed)
540 {
541 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
542 	struct rtw89_sta_link *rtwsta_link;
543 	unsigned int link_id;
544 
545 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
546 		rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed);
547 }
548 
__check_rate_pattern(struct rtw89_phy_rate_pattern * next,u16 rate_base,u64 ra_mask,u8 ra_mode,u32 rate_ctrl,u32 ctrl_skip,bool force)549 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
550 				 u16 rate_base, u64 ra_mask, u8 ra_mode,
551 				 u32 rate_ctrl, u32 ctrl_skip, bool force)
552 {
553 	u8 n, c;
554 
555 	if (rate_ctrl == ctrl_skip)
556 		return true;
557 
558 	n = hweight32(rate_ctrl);
559 	if (n == 0)
560 		return true;
561 
562 	if (force && n != 1)
563 		return false;
564 
565 	if (next->enable)
566 		return false;
567 
568 	c = __fls(rate_ctrl);
569 	next->rate = rate_base + c;
570 	next->ra_mode = ra_mode;
571 	next->ra_mask = ra_mask;
572 	next->enable = true;
573 
574 	return true;
575 }
576 
577 enum __rtw89_hw_rate_invalid_bases {
578 	/* no EHT rate for ax chip */
579 	RTW89_HW_RATE_EHT_NSS1_MCS0 = RTW89_HW_RATE_INVAL,
580 	RTW89_HW_RATE_EHT_NSS2_MCS0 = RTW89_HW_RATE_INVAL,
581 	RTW89_HW_RATE_EHT_NSS3_MCS0 = RTW89_HW_RATE_INVAL,
582 	RTW89_HW_RATE_EHT_NSS4_MCS0 = RTW89_HW_RATE_INVAL,
583 };
584 
585 #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \
586 	{ \
587 		[RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \
588 		[RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \
589 	}
590 
591 static
__rtw89_phy_rate_pattern_vif(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,const struct cfg80211_bitrate_mask * mask)592 void __rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
593 				  struct rtw89_vif_link *rtwvif_link,
594 				  const struct cfg80211_bitrate_mask *mask)
595 {
596 	struct ieee80211_supported_band *sband;
597 	struct rtw89_phy_rate_pattern next_pattern = {0};
598 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
599 						       rtwvif_link->chanctx_idx);
600 	static const u16 hw_rate_eht[][RTW89_CHIP_GEN_NUM] = {
601 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS1_MCS0),
602 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS2_MCS0),
603 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS3_MCS0),
604 		RTW89_HW_RATE_BY_CHIP_GEN(EHT_NSS4_MCS0),
605 	};
606 	static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = {
607 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0),
608 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0),
609 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0),
610 		RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0),
611 	};
612 	static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = {
613 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0),
614 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0),
615 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0),
616 		RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0),
617 	};
618 	static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = {
619 		RTW89_HW_RATE_BY_CHIP_GEN(MCS0),
620 		RTW89_HW_RATE_BY_CHIP_GEN(MCS8),
621 		RTW89_HW_RATE_BY_CHIP_GEN(MCS16),
622 		RTW89_HW_RATE_BY_CHIP_GEN(MCS24),
623 	};
624 	u8 band = chan->band_type;
625 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
626 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
627 	u8 tx_nss = rtwdev->hal.tx_nss;
628 	u8 i;
629 
630 	if (chip_gen == RTW89_CHIP_AX)
631 		goto rs_11ax;
632 
633 	for (i = 0; i < tx_nss; i++)
634 		if (!__check_rate_pattern(&next_pattern, hw_rate_eht[i][chip_gen],
635 					  RA_MASK_EHT_RATES, RTW89_RA_MODE_EHT,
636 					  mask->control[nl_band].eht_mcs[i],
637 					  0, true))
638 			goto out;
639 
640 rs_11ax:
641 	for (i = 0; i < tx_nss; i++)
642 		if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen],
643 					  RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
644 					  mask->control[nl_band].he_mcs[i],
645 					  0, true))
646 			goto out;
647 
648 	for (i = 0; i < tx_nss; i++)
649 		if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen],
650 					  RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
651 					  mask->control[nl_band].vht_mcs[i],
652 					  0, true))
653 			goto out;
654 
655 	for (i = 0; i < tx_nss; i++)
656 		if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen],
657 					  RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
658 					  mask->control[nl_band].ht_mcs[i],
659 					  0, true))
660 			goto out;
661 
662 	/* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
663 	 * require at least one basic rate for ieee80211_set_bitrate_mask,
664 	 * so the decision just depends on if all bitrates are set or not.
665 	 */
666 	sband = rtwdev->hw->wiphy->bands[nl_band];
667 	if (band == RTW89_BAND_2G) {
668 		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
669 					  RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
670 					  RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
671 					  mask->control[nl_band].legacy,
672 					  BIT(sband->n_bitrates) - 1, false))
673 			goto out;
674 	} else {
675 		if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
676 					  RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
677 					  mask->control[nl_band].legacy,
678 					  BIT(sband->n_bitrates) - 1, false))
679 			goto out;
680 	}
681 
682 	if (!next_pattern.enable)
683 		goto out;
684 
685 	if (unlikely(next_pattern.rate >= RTW89_HW_RATE_INVAL)) {
686 		rtw89_debug(rtwdev, RTW89_DBG_RA,
687 			    "pattern invalid target: chip_gen %d, mode 0x%x\n",
688 			    chip_gen, next_pattern.ra_mode);
689 		goto out;
690 	}
691 
692 	rtwvif_link->rate_pattern = next_pattern;
693 	rtw89_debug(rtwdev, RTW89_DBG_RA,
694 		    "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
695 		    next_pattern.rate,
696 		    next_pattern.ra_mask,
697 		    next_pattern.ra_mode);
698 	return;
699 
700 out:
701 	rtwvif_link->rate_pattern.enable = false;
702 	rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
703 }
704 
rtw89_phy_rate_pattern_vif(struct rtw89_dev * rtwdev,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)705 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
706 				struct ieee80211_vif *vif,
707 				const struct cfg80211_bitrate_mask *mask)
708 {
709 	struct rtw89_vif *rtwvif = vif_to_rtwvif(vif);
710 	struct rtw89_vif_link *rtwvif_link;
711 	unsigned int link_id;
712 
713 	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
714 		__rtw89_phy_rate_pattern_vif(rtwdev, rtwvif_link, mask);
715 }
716 
rtw89_phy_ra_update_sta_iter(void * data,struct ieee80211_sta * sta)717 static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta)
718 {
719 	struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
720 
721 	rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED);
722 }
723 
rtw89_phy_ra_update(struct rtw89_dev * rtwdev)724 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
725 {
726 	ieee80211_iterate_stations_atomic(rtwdev->hw,
727 					  rtw89_phy_ra_update_sta_iter,
728 					  rtwdev);
729 }
730 
rtw89_phy_ra_assoc(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)731 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link)
732 {
733 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
734 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
735 	struct rtw89_ra_info *ra = &rtwsta_link->ra;
736 	u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR;
737 	struct ieee80211_link_sta *link_sta;
738 	bool csi;
739 
740 	rcu_read_lock();
741 
742 	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
743 	csi = rtw89_sta_has_beamformer_cap(link_sta);
744 
745 	rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link,
746 				link_sta, vif->p2p, csi);
747 
748 	rcu_read_unlock();
749 
750 	if (rssi > 40)
751 		ra->init_rate_lv = 1;
752 	else if (rssi > 20)
753 		ra->init_rate_lv = 2;
754 	else if (rssi > 1)
755 		ra->init_rate_lv = 3;
756 	else
757 		ra->init_rate_lv = 0;
758 	ra->upd_all = 1;
759 	rtw89_debug(rtwdev, RTW89_DBG_RA,
760 		    "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d",
761 		    ra->macid,
762 		    ra->mode_ctrl,
763 		    ra->bw_cap,
764 		    ra->ss_num,
765 		    ra->init_rate_lv);
766 	rtw89_debug(rtwdev, RTW89_DBG_RA,
767 		    "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d",
768 		    ra->dcm_cap,
769 		    ra->er_cap,
770 		    ra->ldpc_cap,
771 		    ra->stbc_cap,
772 		    ra->en_sgi,
773 		    ra->giltf);
774 
775 	rtw89_fw_h2c_ra(rtwdev, ra, csi);
776 }
777 
rtw89_phy_get_txsc(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_bandwidth dbw)778 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
779 		      const struct rtw89_chan *chan,
780 		      enum rtw89_bandwidth dbw)
781 {
782 	enum rtw89_bandwidth cbw = chan->band_width;
783 	u8 pri_ch = chan->primary_channel;
784 	u8 central_ch = chan->channel;
785 	u8 txsc_idx = 0;
786 	u8 tmp = 0;
787 
788 	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
789 		return txsc_idx;
790 
791 	switch (cbw) {
792 	case RTW89_CHANNEL_WIDTH_40:
793 		txsc_idx = pri_ch > central_ch ? 1 : 2;
794 		break;
795 	case RTW89_CHANNEL_WIDTH_80:
796 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
797 			if (pri_ch > central_ch)
798 				txsc_idx = (pri_ch - central_ch) >> 1;
799 			else
800 				txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
801 		} else {
802 			txsc_idx = pri_ch > central_ch ? 9 : 10;
803 		}
804 		break;
805 	case RTW89_CHANNEL_WIDTH_160:
806 		if (pri_ch > central_ch)
807 			tmp = (pri_ch - central_ch) >> 1;
808 		else
809 			tmp = ((central_ch - pri_ch) >> 1) + 1;
810 
811 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
812 			txsc_idx = tmp;
813 		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
814 			if (tmp == 1 || tmp == 3)
815 				txsc_idx = 9;
816 			else if (tmp == 5 || tmp == 7)
817 				txsc_idx = 11;
818 			else if (tmp == 2 || tmp == 4)
819 				txsc_idx = 10;
820 			else if (tmp == 6 || tmp == 8)
821 				txsc_idx = 12;
822 			else
823 				return 0xff;
824 		} else {
825 			txsc_idx = pri_ch > central_ch ? 13 : 14;
826 		}
827 		break;
828 	case RTW89_CHANNEL_WIDTH_80_80:
829 		if (dbw == RTW89_CHANNEL_WIDTH_20) {
830 			if (pri_ch > central_ch)
831 				txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
832 			else
833 				txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
834 		} else if (dbw == RTW89_CHANNEL_WIDTH_40) {
835 			txsc_idx = pri_ch > central_ch ? 10 : 12;
836 		} else {
837 			txsc_idx = 14;
838 		}
839 		break;
840 	default:
841 		break;
842 	}
843 
844 	return txsc_idx;
845 }
846 EXPORT_SYMBOL(rtw89_phy_get_txsc);
847 
rtw89_phy_get_txsb(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_bandwidth dbw)848 u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
849 		      enum rtw89_bandwidth dbw)
850 {
851 	enum rtw89_bandwidth cbw = chan->band_width;
852 	u8 pri_ch = chan->primary_channel;
853 	u8 central_ch = chan->channel;
854 	u8 txsb_idx = 0;
855 
856 	if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
857 		return txsb_idx;
858 
859 	switch (cbw) {
860 	case RTW89_CHANNEL_WIDTH_40:
861 		txsb_idx = pri_ch > central_ch ? 1 : 0;
862 		break;
863 	case RTW89_CHANNEL_WIDTH_80:
864 		if (dbw == RTW89_CHANNEL_WIDTH_20)
865 			txsb_idx = (pri_ch - central_ch + 6) / 4;
866 		else
867 			txsb_idx = pri_ch > central_ch ? 1 : 0;
868 		break;
869 	case RTW89_CHANNEL_WIDTH_160:
870 		if (dbw == RTW89_CHANNEL_WIDTH_20)
871 			txsb_idx = (pri_ch - central_ch + 14) / 4;
872 		else if (dbw == RTW89_CHANNEL_WIDTH_40)
873 			txsb_idx = (pri_ch - central_ch + 12) / 8;
874 		else
875 			txsb_idx = pri_ch > central_ch ? 1 : 0;
876 		break;
877 	case RTW89_CHANNEL_WIDTH_320:
878 		if (dbw == RTW89_CHANNEL_WIDTH_20)
879 			txsb_idx = (pri_ch - central_ch + 30) / 4;
880 		else if (dbw == RTW89_CHANNEL_WIDTH_40)
881 			txsb_idx = (pri_ch - central_ch + 28) / 8;
882 		else if (dbw == RTW89_CHANNEL_WIDTH_80)
883 			txsb_idx = (pri_ch - central_ch + 24) / 16;
884 		else
885 			txsb_idx = pri_ch > central_ch ? 1 : 0;
886 		break;
887 	default:
888 		break;
889 	}
890 
891 	return txsb_idx;
892 }
893 EXPORT_SYMBOL(rtw89_phy_get_txsb);
894 
rtw89_phy_check_swsi_busy(struct rtw89_dev * rtwdev)895 static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev)
896 {
897 	return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) ||
898 	       !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1);
899 }
900 
rtw89_phy_read_rf(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)901 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
902 		      u32 addr, u32 mask)
903 {
904 	const struct rtw89_chip_info *chip = rtwdev->chip;
905 	const u32 *base_addr = chip->rf_base_addr;
906 	u32 val, direct_addr;
907 
908 	if (rf_path >= rtwdev->chip->rf_path_num) {
909 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
910 		return INV_RF_DATA;
911 	}
912 
913 	addr &= 0xff;
914 	direct_addr = base_addr[rf_path] + (addr << 2);
915 	mask &= RFREG_MASK;
916 
917 	val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
918 
919 	return val;
920 }
921 EXPORT_SYMBOL(rtw89_phy_read_rf);
922 
rtw89_phy_read_rf_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)923 static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev,
924 			       enum rtw89_rf_path rf_path, u32 addr, u32 mask)
925 {
926 	bool busy;
927 	bool done;
928 	u32 val;
929 	int ret;
930 
931 	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
932 				       1, 30, false, rtwdev);
933 	if (ret) {
934 		rtw89_err(rtwdev, "read rf busy swsi\n");
935 		return INV_RF_DATA;
936 	}
937 
938 	mask &= RFREG_MASK;
939 
940 	val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) |
941 	      FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr);
942 	rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val);
943 	udelay(2);
944 
945 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1,
946 				       30, false, rtwdev, R_SWSI_V1,
947 				       B_SWSI_R_DATA_DONE_V1);
948 	if (ret) {
949 		if (!test_bit(RTW89_FLAG_UNPLUGGED, rtwdev->flags))
950 			rtw89_err(rtwdev, "read swsi busy\n");
951 		return INV_RF_DATA;
952 	}
953 
954 	return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask);
955 }
956 
rtw89_phy_read_rf_v1(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)957 u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
958 			 u32 addr, u32 mask)
959 {
960 	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
961 
962 	if (rf_path >= rtwdev->chip->rf_path_num) {
963 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
964 		return INV_RF_DATA;
965 	}
966 
967 	if (ad_sel)
968 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
969 	else
970 		return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask);
971 }
972 EXPORT_SYMBOL(rtw89_phy_read_rf_v1);
973 
rtw89_phy_read_full_rf_v2_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr)974 static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev,
975 				       enum rtw89_rf_path rf_path, u32 addr)
976 {
977 	static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24};
978 	static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC};
979 	bool busy, done;
980 	int ret;
981 	u32 val;
982 
983 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1);
984 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
985 				       1, 3800, false,
986 				       rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY);
987 	if (ret) {
988 		rtw89_warn(rtwdev, "poll HWSI is busy\n");
989 		return INV_RF_DATA;
990 	}
991 
992 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr);
993 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1);
994 	udelay(2);
995 
996 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
997 				       1, 3800, false,
998 				       rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE);
999 	if (ret) {
1000 		rtw89_warn(rtwdev, "read HWSI is busy\n");
1001 		val = INV_RF_DATA;
1002 		goto out;
1003 	}
1004 
1005 	val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK);
1006 out:
1007 	rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0);
1008 
1009 	return val;
1010 }
1011 
rtw89_phy_read_rf_v2_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)1012 static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev,
1013 				  enum rtw89_rf_path rf_path, u32 addr, u32 mask)
1014 {
1015 	u32 val;
1016 
1017 	val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
1018 
1019 	return (val & mask) >> __ffs(mask);
1020 }
1021 
rtw89_phy_read_rf_v2(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)1022 u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1023 			 u32 addr, u32 mask)
1024 {
1025 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1026 
1027 	if (rf_path >= rtwdev->chip->rf_path_num) {
1028 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1029 		return INV_RF_DATA;
1030 	}
1031 
1032 	if (ad_sel)
1033 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
1034 	else
1035 		return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask);
1036 }
1037 EXPORT_SYMBOL(rtw89_phy_read_rf_v2);
1038 
rtw89_phy_read_full_rf_v3_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr)1039 static u32 rtw89_phy_read_full_rf_v3_a(struct rtw89_dev *rtwdev,
1040 				       enum rtw89_rf_path rf_path, u32 addr)
1041 {
1042 	bool done;
1043 	u32 busy;
1044 	int ret;
1045 	u32 val;
1046 
1047 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1048 				       1, 30, false,
1049 				       rtwdev, R_SW_SI_DATA_BE4,
1050 				       B_SW_SI_W_BUSY_BE4 | B_SW_SI_R_BUSY_BE4);
1051 	if (ret) {
1052 		rtw89_warn(rtwdev, "poll HWSI is busy\n");
1053 		return INV_RF_DATA;
1054 	}
1055 
1056 	val = u32_encode_bits(rf_path, GENMASK(10, 8)) |
1057 	      u32_encode_bits(addr, GENMASK(7, 0));
1058 
1059 	rtw89_phy_write32_mask(rtwdev, R_SW_SI_READ_ADDR_BE4, B_SW_SI_READ_ADDR_BE4, val);
1060 
1061 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done,
1062 				       1, 30, false,
1063 				       rtwdev, R_SW_SI_DATA_BE4, B_SW_SI_READ_DATA_DONE_BE4);
1064 	if (ret) {
1065 		rtw89_warn(rtwdev, "read HWSI is busy\n");
1066 		return INV_RF_DATA;
1067 	}
1068 
1069 	val = rtw89_phy_read32_mask(rtwdev, R_SW_SI_DATA_BE4, B_SW_SI_READ_DATA_BE4);
1070 
1071 	return val;
1072 }
1073 
rtw89_phy_read_rf_v3_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)1074 static u32 rtw89_phy_read_rf_v3_a(struct rtw89_dev *rtwdev,
1075 				  enum rtw89_rf_path rf_path, u32 addr, u32 mask)
1076 {
1077 	u32 val;
1078 
1079 	val = rtw89_phy_read_full_rf_v3_a(rtwdev, rf_path, addr);
1080 
1081 	return (val & mask) >> __ffs(mask);
1082 }
1083 
rtw89_phy_read_rf_v3(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask)1084 u32 rtw89_phy_read_rf_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1085 			 u32 addr, u32 mask)
1086 {
1087 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1088 
1089 	if (rf_path >= rtwdev->chip->rf_path_num) {
1090 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1091 		return INV_RF_DATA;
1092 	}
1093 
1094 	if (ad_sel)
1095 		return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask);
1096 	else
1097 		return rtw89_phy_read_rf_v3_a(rtwdev, rf_path, addr, mask);
1098 }
1099 EXPORT_SYMBOL(rtw89_phy_read_rf_v3);
1100 
rtw89_phy_write_rf(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1101 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1102 			u32 addr, u32 mask, u32 data)
1103 {
1104 	const struct rtw89_chip_info *chip = rtwdev->chip;
1105 	const u32 *base_addr = chip->rf_base_addr;
1106 	u32 direct_addr;
1107 
1108 	if (rf_path >= rtwdev->chip->rf_path_num) {
1109 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1110 		return false;
1111 	}
1112 
1113 	addr &= 0xff;
1114 	direct_addr = base_addr[rf_path] + (addr << 2);
1115 	mask &= RFREG_MASK;
1116 
1117 	rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
1118 
1119 	/* delay to ensure writing properly */
1120 	udelay(1);
1121 
1122 	return true;
1123 }
1124 EXPORT_SYMBOL(rtw89_phy_write_rf);
1125 
rtw89_phy_write_rf_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1126 static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev,
1127 				 enum rtw89_rf_path rf_path, u32 addr, u32 mask,
1128 				 u32 data)
1129 {
1130 	u8 bit_shift;
1131 	u32 val;
1132 	bool busy, b_msk_en = false;
1133 	int ret;
1134 
1135 	ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy,
1136 				       1, 30, false, rtwdev);
1137 	if (ret) {
1138 		rtw89_err(rtwdev, "write rf busy swsi\n");
1139 		return false;
1140 	}
1141 
1142 	data &= RFREG_MASK;
1143 	mask &= RFREG_MASK;
1144 
1145 	if (mask != RFREG_MASK) {
1146 		b_msk_en = true;
1147 		rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK,
1148 				       mask);
1149 		bit_shift = __ffs(mask);
1150 		data = (data << bit_shift) & RFREG_MASK;
1151 	}
1152 
1153 	val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) |
1154 	      FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) |
1155 	      FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) |
1156 	      FIELD_PREP(B_SWSI_DATA_VAL_V1, data);
1157 
1158 	rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val);
1159 
1160 	return true;
1161 }
1162 
rtw89_phy_write_rf_v1(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1163 bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1164 			   u32 addr, u32 mask, u32 data)
1165 {
1166 	bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr);
1167 
1168 	if (rf_path >= rtwdev->chip->rf_path_num) {
1169 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1170 		return false;
1171 	}
1172 
1173 	if (ad_sel)
1174 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1175 	else
1176 		return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data);
1177 }
1178 EXPORT_SYMBOL(rtw89_phy_write_rf_v1);
1179 
1180 static
rtw89_phy_write_full_rf_v2_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 data)1181 bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1182 				  u32 addr, u32 data)
1183 {
1184 	static const u32 addr_is_idle[2] = {0x2C24, 0x2D24};
1185 	static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0};
1186 	bool busy;
1187 	u32 val;
1188 	int ret;
1189 
1190 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1191 				       1, 3800, false,
1192 				       rtwdev, addr_is_idle[rf_path], BIT(29));
1193 	if (ret) {
1194 		rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
1195 		return false;
1196 	}
1197 
1198 	val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) |
1199 	      u32_encode_bits(data, B_HWSI_DATA_VAL);
1200 
1201 	rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val);
1202 
1203 	return true;
1204 }
1205 
1206 static
rtw89_phy_write_rf_a_v2(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1207 bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1208 			     u32 addr, u32 mask, u32 data)
1209 {
1210 	u32 val;
1211 
1212 	if (mask == RFREG_MASK) {
1213 		val = data;
1214 	} else {
1215 		val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr);
1216 		val &= ~mask;
1217 		val |= (data << __ffs(mask)) & mask;
1218 	}
1219 
1220 	return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val);
1221 }
1222 
rtw89_phy_write_rf_v2(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1223 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1224 			   u32 addr, u32 mask, u32 data)
1225 {
1226 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1227 
1228 	if (rf_path >= rtwdev->chip->rf_path_num) {
1229 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1230 		return INV_RF_DATA;
1231 	}
1232 
1233 	if (ad_sel)
1234 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1235 	else
1236 		return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data);
1237 }
1238 EXPORT_SYMBOL(rtw89_phy_write_rf_v2);
1239 
1240 static
rtw89_phy_write_full_rf_v3_a(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 data)1241 bool rtw89_phy_write_full_rf_v3_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1242 				  u32 addr, u32 data)
1243 {
1244 	u32 busy;
1245 	u32 val;
1246 	int ret;
1247 
1248 	ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy,
1249 				       1, 30, false,
1250 				       rtwdev, R_SW_SI_DATA_BE4,
1251 				       B_SW_SI_W_BUSY_BE4 | B_SW_SI_R_BUSY_BE4);
1252 	if (ret) {
1253 		rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__);
1254 		return false;
1255 	}
1256 
1257 	val = u32_encode_bits(rf_path, B_SW_SI_DATA_PATH_BE4) |
1258 	      u32_encode_bits(addr, B_SW_SI_DATA_ADR_BE4) |
1259 	      u32_encode_bits(data, B_SW_SI_DATA_DAT_BE4);
1260 
1261 	rtw89_phy_write32(rtwdev, R_SW_SI_WDATA_BE4, val);
1262 
1263 	return true;
1264 }
1265 
1266 static
rtw89_phy_write_rf_a_v3(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1267 bool rtw89_phy_write_rf_a_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1268 			     u32 addr, u32 mask, u32 data)
1269 {
1270 	u32 val;
1271 
1272 	if (mask == RFREG_MASK) {
1273 		val = data;
1274 	} else {
1275 		val = rtw89_phy_read_full_rf_v3_a(rtwdev, rf_path, addr);
1276 		val &= ~mask;
1277 		val |= (data << __ffs(mask)) & mask;
1278 	}
1279 
1280 	return rtw89_phy_write_full_rf_v3_a(rtwdev, rf_path, addr, val);
1281 }
1282 
rtw89_phy_write_rf_v3(struct rtw89_dev * rtwdev,enum rtw89_rf_path rf_path,u32 addr,u32 mask,u32 data)1283 bool rtw89_phy_write_rf_v3(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
1284 			   u32 addr, u32 mask, u32 data)
1285 {
1286 	bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK);
1287 
1288 	if (rf_path >= rtwdev->chip->rf_path_num) {
1289 		rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
1290 		return INV_RF_DATA;
1291 	}
1292 
1293 	if (ad_sel)
1294 		return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data);
1295 	else
1296 		return rtw89_phy_write_rf_a_v3(rtwdev, rf_path, addr, mask, data);
1297 }
1298 EXPORT_SYMBOL(rtw89_phy_write_rf_v3);
1299 
rtw89_chip_rf_v1(struct rtw89_dev * rtwdev)1300 static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev)
1301 {
1302 	return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1;
1303 }
1304 
__rtw89_phy_bb_reset(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)1305 static void __rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
1306 				 enum rtw89_phy_idx phy_idx)
1307 {
1308 	const struct rtw89_chip_info *chip = rtwdev->chip;
1309 
1310 	chip->ops->bb_reset(rtwdev, phy_idx);
1311 }
1312 
rtw89_phy_bb_reset(struct rtw89_dev * rtwdev)1313 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev)
1314 {
1315 	__rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
1316 	if (rtwdev->dbcc_en)
1317 		__rtw89_phy_bb_reset(rtwdev, RTW89_PHY_1);
1318 }
1319 
rtw89_phy_config_bb_reg(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1320 static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
1321 				    const struct rtw89_reg2_def *reg,
1322 				    enum rtw89_rf_path rf_path,
1323 				    void *extra_data)
1324 {
1325 	u32 addr;
1326 
1327 	if (reg->addr == 0xfe) {
1328 		mdelay(50);
1329 	} else if (reg->addr == 0xfd) {
1330 		mdelay(5);
1331 	} else if (reg->addr == 0xfc) {
1332 		mdelay(1);
1333 	} else if (reg->addr == 0xfb) {
1334 		udelay(50);
1335 	} else if (reg->addr == 0xfa) {
1336 		udelay(5);
1337 	} else if (reg->addr == 0xf9) {
1338 		udelay(1);
1339 	} else if (reg->data == BYPASS_CR_DATA) {
1340 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr);
1341 	} else {
1342 		addr = reg->addr;
1343 
1344 		if ((uintptr_t)extra_data == RTW89_PHY_1)
1345 			addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr);
1346 
1347 		rtw89_phy_write32(rtwdev, addr, reg->data);
1348 	}
1349 }
1350 
1351 union rtw89_phy_bb_gain_arg {
1352 	u32 addr;
1353 	struct {
1354 		union {
1355 			u8 type;
1356 			struct {
1357 				u8 rxsc_start:4;
1358 				u8 bw:4;
1359 			};
1360 		};
1361 		u8 path;
1362 		u8 gain_band;
1363 		u8 cfg_type;
1364 	};
1365 } __packed;
1366 
1367 static void
rtw89_phy_cfg_bb_gain_error(struct rtw89_dev * rtwdev,union rtw89_phy_bb_gain_arg arg,u32 data)1368 rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev,
1369 			    union rtw89_phy_bb_gain_arg arg, u32 data)
1370 {
1371 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1372 	u8 type = arg.type;
1373 	u8 path = arg.path;
1374 	u8 gband = arg.gain_band;
1375 	int i;
1376 
1377 	switch (type) {
1378 	case 0:
1379 		for (i = 0; i < 4; i++, data >>= 8)
1380 			gain->lna_gain[gband][path][i] = data & 0xff;
1381 		break;
1382 	case 1:
1383 		for (i = 4; i < 7; i++, data >>= 8)
1384 			gain->lna_gain[gband][path][i] = data & 0xff;
1385 		break;
1386 	case 2:
1387 		for (i = 0; i < 2; i++, data >>= 8)
1388 			gain->tia_gain[gband][path][i] = data & 0xff;
1389 		break;
1390 	default:
1391 		rtw89_warn(rtwdev,
1392 			   "bb gain error {0x%x:0x%x} with unknown type: %d\n",
1393 			   arg.addr, data, type);
1394 		break;
1395 	}
1396 }
1397 
1398 enum rtw89_phy_bb_rxsc_start_idx {
1399 	RTW89_BB_RXSC_START_IDX_FULL = 0,
1400 	RTW89_BB_RXSC_START_IDX_20 = 1,
1401 	RTW89_BB_RXSC_START_IDX_20_1 = 5,
1402 	RTW89_BB_RXSC_START_IDX_40 = 9,
1403 	RTW89_BB_RXSC_START_IDX_80 = 13,
1404 };
1405 
1406 static void
rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev * rtwdev,union rtw89_phy_bb_gain_arg arg,u32 data)1407 rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev,
1408 			  union rtw89_phy_bb_gain_arg arg, u32 data)
1409 {
1410 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1411 	u8 rxsc_start = arg.rxsc_start;
1412 	u8 bw = arg.bw;
1413 	u8 path = arg.path;
1414 	u8 gband = arg.gain_band;
1415 	u8 rxsc;
1416 	s8 ofst;
1417 	int i;
1418 
1419 	switch (bw) {
1420 	case RTW89_CHANNEL_WIDTH_20:
1421 		gain->rpl_ofst_20[gband][path] = (s8)data;
1422 		break;
1423 	case RTW89_CHANNEL_WIDTH_40:
1424 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1425 			gain->rpl_ofst_40[gband][path][0] = (s8)data;
1426 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1427 			for (i = 0; i < 2; i++, data >>= 8) {
1428 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1429 				ofst = (s8)(data & 0xff);
1430 				gain->rpl_ofst_40[gband][path][rxsc] = ofst;
1431 			}
1432 		}
1433 		break;
1434 	case RTW89_CHANNEL_WIDTH_80:
1435 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1436 			gain->rpl_ofst_80[gband][path][0] = (s8)data;
1437 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1438 			for (i = 0; i < 4; i++, data >>= 8) {
1439 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1440 				ofst = (s8)(data & 0xff);
1441 				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
1442 			}
1443 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
1444 			for (i = 0; i < 2; i++, data >>= 8) {
1445 				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
1446 				ofst = (s8)(data & 0xff);
1447 				gain->rpl_ofst_80[gband][path][rxsc] = ofst;
1448 			}
1449 		}
1450 		break;
1451 	case RTW89_CHANNEL_WIDTH_160:
1452 		if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) {
1453 			gain->rpl_ofst_160[gband][path][0] = (s8)data;
1454 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) {
1455 			for (i = 0; i < 4; i++, data >>= 8) {
1456 				rxsc = RTW89_BB_RXSC_START_IDX_20 + i;
1457 				ofst = (s8)(data & 0xff);
1458 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1459 			}
1460 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) {
1461 			for (i = 0; i < 4; i++, data >>= 8) {
1462 				rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i;
1463 				ofst = (s8)(data & 0xff);
1464 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1465 			}
1466 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) {
1467 			for (i = 0; i < 4; i++, data >>= 8) {
1468 				rxsc = RTW89_BB_RXSC_START_IDX_40 + i;
1469 				ofst = (s8)(data & 0xff);
1470 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1471 			}
1472 		} else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) {
1473 			for (i = 0; i < 2; i++, data >>= 8) {
1474 				rxsc = RTW89_BB_RXSC_START_IDX_80 + i;
1475 				ofst = (s8)(data & 0xff);
1476 				gain->rpl_ofst_160[gband][path][rxsc] = ofst;
1477 			}
1478 		}
1479 		break;
1480 	default:
1481 		rtw89_warn(rtwdev,
1482 			   "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n",
1483 			   arg.addr, data, bw);
1484 		break;
1485 	}
1486 }
1487 
1488 static void
rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev * rtwdev,union rtw89_phy_bb_gain_arg arg,u32 data)1489 rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev,
1490 			     union rtw89_phy_bb_gain_arg arg, u32 data)
1491 {
1492 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1493 	u8 type = arg.type;
1494 	u8 path = arg.path;
1495 	u8 gband = arg.gain_band;
1496 	int i;
1497 
1498 	switch (type) {
1499 	case 0:
1500 		for (i = 0; i < 4; i++, data >>= 8)
1501 			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1502 		break;
1503 	case 1:
1504 		for (i = 4; i < 7; i++, data >>= 8)
1505 			gain->lna_gain_bypass[gband][path][i] = data & 0xff;
1506 		break;
1507 	default:
1508 		rtw89_warn(rtwdev,
1509 			   "bb gain bypass {0x%x:0x%x} with unknown type: %d\n",
1510 			   arg.addr, data, type);
1511 		break;
1512 	}
1513 }
1514 
1515 static void
rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev * rtwdev,union rtw89_phy_bb_gain_arg arg,u32 data)1516 rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev,
1517 			    union rtw89_phy_bb_gain_arg arg, u32 data)
1518 {
1519 	struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
1520 	u8 type = arg.type;
1521 	u8 path = arg.path;
1522 	u8 gband = arg.gain_band;
1523 	int i;
1524 
1525 	switch (type) {
1526 	case 0:
1527 		for (i = 0; i < 4; i++, data >>= 8)
1528 			gain->lna_op1db[gband][path][i] = data & 0xff;
1529 		break;
1530 	case 1:
1531 		for (i = 4; i < 7; i++, data >>= 8)
1532 			gain->lna_op1db[gband][path][i] = data & 0xff;
1533 		break;
1534 	case 2:
1535 		for (i = 0; i < 4; i++, data >>= 8)
1536 			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1537 		break;
1538 	case 3:
1539 		for (i = 4; i < 8; i++, data >>= 8)
1540 			gain->tia_lna_op1db[gband][path][i] = data & 0xff;
1541 		break;
1542 	default:
1543 		rtw89_warn(rtwdev,
1544 			   "bb gain op1db {0x%x:0x%x} with unknown type: %d\n",
1545 			   arg.addr, data, type);
1546 		break;
1547 	}
1548 }
1549 
rtw89_phy_config_bb_gain_ax(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1550 static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev,
1551 					const struct rtw89_reg2_def *reg,
1552 					enum rtw89_rf_path rf_path,
1553 					void *extra_data)
1554 {
1555 	const struct rtw89_chip_info *chip = rtwdev->chip;
1556 	union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr };
1557 	struct rtw89_efuse *efuse = &rtwdev->efuse;
1558 
1559 	if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR)
1560 		return;
1561 
1562 	if (arg.path >= chip->rf_path_num)
1563 		return;
1564 
1565 	if (arg.addr >= 0xf9 && arg.addr <= 0xfe) {
1566 		rtw89_warn(rtwdev, "bb gain table with flow ctrl\n");
1567 		return;
1568 	}
1569 
1570 	switch (arg.cfg_type) {
1571 	case 0:
1572 		rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data);
1573 		break;
1574 	case 1:
1575 		rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data);
1576 		break;
1577 	case 2:
1578 		rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data);
1579 		break;
1580 	case 3:
1581 		rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data);
1582 		break;
1583 	case 4:
1584 		/* This cfg_type is only used by rfe_type >= 50 with eFEM */
1585 		if (efuse->rfe_type < 50)
1586 			break;
1587 		fallthrough;
1588 	default:
1589 		rtw89_warn(rtwdev,
1590 			   "bb gain {0x%x:0x%x} with unknown cfg type: %d\n",
1591 			   arg.addr, reg->data, arg.cfg_type);
1592 		break;
1593 	}
1594 }
1595 
1596 static void
rtw89_phy_cofig_rf_reg_store(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,struct rtw89_fw_h2c_rf_reg_info * info)1597 rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
1598 			     const struct rtw89_reg2_def *reg,
1599 			     enum rtw89_rf_path rf_path,
1600 			     struct rtw89_fw_h2c_rf_reg_info *info)
1601 {
1602 	u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
1603 	u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
1604 
1605 	if (page >= RTW89_H2C_RF_PAGE_NUM) {
1606 		rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d",
1607 			   rf_path, info->curr_idx);
1608 		return;
1609 	}
1610 
1611 	info->rtw89_phy_config_rf_h2c[page][idx] =
1612 		cpu_to_le32((reg->addr << 20) | reg->data);
1613 	info->curr_idx++;
1614 }
1615 
rtw89_phy_config_rf_reg_fw(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info)1616 static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
1617 				      struct rtw89_fw_h2c_rf_reg_info *info)
1618 {
1619 	u16 remain = info->curr_idx;
1620 	u16 len = 0;
1621 	u8 i;
1622 	int ret = 0;
1623 
1624 	if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) {
1625 		rtw89_warn(rtwdev,
1626 			   "rf reg h2c total len %d larger than %d\n",
1627 			   remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE);
1628 		ret = -EINVAL;
1629 		goto out;
1630 	}
1631 
1632 	for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) {
1633 		len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain;
1634 		ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i);
1635 		if (ret)
1636 			goto out;
1637 	}
1638 out:
1639 	info->curr_idx = 0;
1640 
1641 	return ret;
1642 }
1643 
rtw89_phy_config_rf_reg_noio(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1644 static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev,
1645 					 const struct rtw89_reg2_def *reg,
1646 					 enum rtw89_rf_path rf_path,
1647 					 void *extra_data)
1648 {
1649 	u32 addr = reg->addr;
1650 
1651 	if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb ||
1652 	    addr == 0xfa || addr == 0xf9)
1653 		return;
1654 
1655 	if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100)
1656 		return;
1657 
1658 	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1659 				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1660 }
1661 
rtw89_phy_config_rf_reg(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1662 static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
1663 				    const struct rtw89_reg2_def *reg,
1664 				    enum rtw89_rf_path rf_path,
1665 				    void *extra_data)
1666 {
1667 	if (reg->addr == 0xfe) {
1668 		mdelay(50);
1669 	} else if (reg->addr == 0xfd) {
1670 		mdelay(5);
1671 	} else if (reg->addr == 0xfc) {
1672 		mdelay(1);
1673 	} else if (reg->addr == 0xfb) {
1674 		udelay(50);
1675 	} else if (reg->addr == 0xfa) {
1676 		udelay(5);
1677 	} else if (reg->addr == 0xf9) {
1678 		udelay(1);
1679 	} else {
1680 		rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
1681 		rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1682 					     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1683 	}
1684 }
1685 
rtw89_phy_config_rf_reg_v1(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * extra_data)1686 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev,
1687 				const struct rtw89_reg2_def *reg,
1688 				enum rtw89_rf_path rf_path,
1689 				void *extra_data)
1690 {
1691 	rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data);
1692 
1693 	if (reg->addr < 0x100)
1694 		return;
1695 
1696 	rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
1697 				     (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
1698 }
1699 EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1);
1700 
rtw89_phy_sel_headline(struct rtw89_dev * rtwdev,const struct rtw89_phy_table * table,u32 * headline_size,u32 * headline_idx,u8 rfe,u8 cv)1701 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
1702 				  const struct rtw89_phy_table *table,
1703 				  u32 *headline_size, u32 *headline_idx,
1704 				  u8 rfe, u8 cv)
1705 {
1706 	const struct rtw89_reg2_def *reg;
1707 	u32 headline;
1708 	u32 compare, target;
1709 	u8 rfe_para, cv_para;
1710 	u8 cv_max = 0;
1711 	bool case_matched = false;
1712 	u32 i;
1713 
1714 	for (i = 0; i < table->n_regs; i++) {
1715 		reg = &table->regs[i];
1716 		headline = get_phy_headline(reg->addr);
1717 		if (headline != PHY_HEADLINE_VALID)
1718 			break;
1719 	}
1720 	*headline_size = i;
1721 	if (*headline_size == 0)
1722 		return 0;
1723 
1724 	/* case 1: RFE match, CV match */
1725 	compare = get_phy_compare(rfe, cv);
1726 	for (i = 0; i < *headline_size; i++) {
1727 		reg = &table->regs[i];
1728 		target = get_phy_target(reg->addr);
1729 		if (target == compare) {
1730 			*headline_idx = i;
1731 			return 0;
1732 		}
1733 	}
1734 
1735 	/* case 2: RFE match, CV don't care */
1736 	compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
1737 	for (i = 0; i < *headline_size; i++) {
1738 		reg = &table->regs[i];
1739 		target = get_phy_target(reg->addr);
1740 		if (target == compare) {
1741 			*headline_idx = i;
1742 			return 0;
1743 		}
1744 	}
1745 
1746 	/* case 3: RFE match, CV max in table */
1747 	for (i = 0; i < *headline_size; i++) {
1748 		reg = &table->regs[i];
1749 		rfe_para = get_phy_cond_rfe(reg->addr);
1750 		cv_para = get_phy_cond_cv(reg->addr);
1751 		if (rfe_para == rfe) {
1752 			if (cv_para >= cv_max) {
1753 				cv_max = cv_para;
1754 				*headline_idx = i;
1755 				case_matched = true;
1756 			}
1757 		}
1758 	}
1759 
1760 	if (case_matched)
1761 		return 0;
1762 
1763 	/* case 4: RFE don't care, CV max in table */
1764 	for (i = 0; i < *headline_size; i++) {
1765 		reg = &table->regs[i];
1766 		rfe_para = get_phy_cond_rfe(reg->addr);
1767 		cv_para = get_phy_cond_cv(reg->addr);
1768 		if (rfe_para == PHY_COND_DONT_CARE) {
1769 			if (cv_para >= cv_max) {
1770 				cv_max = cv_para;
1771 				*headline_idx = i;
1772 				case_matched = true;
1773 			}
1774 		}
1775 	}
1776 
1777 	if (case_matched)
1778 		return 0;
1779 
1780 	return -EINVAL;
1781 }
1782 
rtw89_phy_init_reg(struct rtw89_dev * rtwdev,const struct rtw89_phy_table * table,void (* config)(struct rtw89_dev * rtwdev,const struct rtw89_reg2_def * reg,enum rtw89_rf_path rf_path,void * data),void * extra_data)1783 static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
1784 			       const struct rtw89_phy_table *table,
1785 			       void (*config)(struct rtw89_dev *rtwdev,
1786 					      const struct rtw89_reg2_def *reg,
1787 					      enum rtw89_rf_path rf_path,
1788 					      void *data),
1789 			       void *extra_data)
1790 {
1791 	const struct rtw89_reg2_def *reg;
1792 	enum rtw89_rf_path rf_path = table->rf_path;
1793 	u8 rfe = rtwdev->efuse.rfe_type;
1794 	u8 cv = rtwdev->hal.cv;
1795 	u32 i;
1796 	u32 headline_size = 0, headline_idx = 0;
1797 	u32 target = 0, cfg_target;
1798 	u8 cond;
1799 	bool is_matched = true;
1800 	bool target_found = false;
1801 	int ret;
1802 
1803 	ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
1804 				     &headline_idx, rfe, cv);
1805 	if (ret) {
1806 		rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv);
1807 		return;
1808 	}
1809 
1810 	cfg_target = get_phy_target(table->regs[headline_idx].addr);
1811 	for (i = headline_size; i < table->n_regs; i++) {
1812 		reg = &table->regs[i];
1813 		cond = get_phy_cond(reg->addr);
1814 		switch (cond) {
1815 		case PHY_COND_BRANCH_IF:
1816 		case PHY_COND_BRANCH_ELIF:
1817 			target = get_phy_target(reg->addr);
1818 			break;
1819 		case PHY_COND_BRANCH_ELSE:
1820 			is_matched = false;
1821 			if (!target_found) {
1822 				rtw89_warn(rtwdev, "failed to load CR %x/%x\n",
1823 					   reg->addr, reg->data);
1824 				return;
1825 			}
1826 			break;
1827 		case PHY_COND_BRANCH_END:
1828 			is_matched = true;
1829 			target_found = false;
1830 			break;
1831 		case PHY_COND_CHECK:
1832 			if (target_found) {
1833 				is_matched = false;
1834 				break;
1835 			}
1836 
1837 			if (target == cfg_target) {
1838 				is_matched = true;
1839 				target_found = true;
1840 			} else {
1841 				is_matched = false;
1842 				target_found = false;
1843 			}
1844 			break;
1845 		default:
1846 			if (is_matched)
1847 				config(rtwdev, reg, rf_path, extra_data);
1848 			break;
1849 		}
1850 	}
1851 }
1852 
rtw89_phy_init_bb_reg(struct rtw89_dev * rtwdev)1853 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
1854 {
1855 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1856 	const struct rtw89_chip_info *chip = rtwdev->chip;
1857 	const struct rtw89_phy_table *bb_table;
1858 	const struct rtw89_phy_table *bb_gain_table;
1859 
1860 	bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table;
1861 	rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
1862 	if (rtwdev->dbcc_en)
1863 		rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg,
1864 				   (void *)RTW89_PHY_1);
1865 
1866 	rtw89_chip_init_txpwr_unit(rtwdev);
1867 
1868 	bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table;
1869 	if (bb_gain_table)
1870 		rtw89_phy_init_reg(rtwdev, bb_gain_table,
1871 				   chip->phy_def->config_bb_gain, NULL);
1872 
1873 	rtw89_phy_bb_reset(rtwdev);
1874 }
1875 
rtw89_phy_init_bb_afe(struct rtw89_dev * rtwdev)1876 void rtw89_phy_init_bb_afe(struct rtw89_dev *rtwdev)
1877 {
1878 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1879 	const struct rtw89_fw_element_hdr *afe_elm = elm_info->afe;
1880 	const struct rtw89_phy_afe_info *info;
1881 	u32 action, cat, class;
1882 	u32 addr, mask, val;
1883 	u32 poll, rpt;
1884 	u32 n, i;
1885 
1886 	if (!afe_elm)
1887 		return;
1888 
1889 	n = le32_to_cpu(afe_elm->size) / sizeof(*info);
1890 
1891 	for (i = 0; i < n; i++) {
1892 		info = &afe_elm->u.afe.infos[i];
1893 
1894 		class = le32_to_cpu(info->class);
1895 		switch (class) {
1896 		case RTW89_FW_AFE_CLASS_P0:
1897 		case RTW89_FW_AFE_CLASS_P1:
1898 		case RTW89_FW_AFE_CLASS_CMN:
1899 			/* Currently support two paths */
1900 			break;
1901 		case RTW89_FW_AFE_CLASS_P2:
1902 		case RTW89_FW_AFE_CLASS_P3:
1903 		case RTW89_FW_AFE_CLASS_P4:
1904 		default:
1905 			rtw89_warn(rtwdev, "unexpected AFE class %u\n", class);
1906 			continue;
1907 		}
1908 
1909 		addr = le32_to_cpu(info->addr);
1910 		mask = le32_to_cpu(info->mask);
1911 		val = le32_to_cpu(info->val);
1912 		cat = le32_to_cpu(info->cat);
1913 		action = le32_to_cpu(info->action);
1914 
1915 		switch (action) {
1916 		case RTW89_FW_AFE_ACTION_WRITE:
1917 			switch (cat) {
1918 			case RTW89_FW_AFE_CAT_MAC:
1919 			case RTW89_FW_AFE_CAT_MAC1:
1920 				rtw89_write32_mask(rtwdev, addr, mask, val);
1921 				break;
1922 			case RTW89_FW_AFE_CAT_AFEDIG:
1923 			case RTW89_FW_AFE_CAT_AFEDIG1:
1924 				rtw89_write32_mask(rtwdev, addr, mask, val);
1925 				break;
1926 			case RTW89_FW_AFE_CAT_BB:
1927 				rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
1928 				break;
1929 			case RTW89_FW_AFE_CAT_BB1:
1930 				rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
1931 				break;
1932 			default:
1933 				rtw89_warn(rtwdev,
1934 					   "unexpected AFE writing action %u\n", action);
1935 				break;
1936 			}
1937 			break;
1938 		case RTW89_FW_AFE_ACTION_POLL:
1939 			for (poll = 0; poll <= 10; poll++) {
1940 				/*
1941 				 * For CAT_BB, AFE reads register with mcu_offset 0,
1942 				 * so both CAT_MAC and CAT_BB use the same method.
1943 				 */
1944 				rpt = rtw89_read32_mask(rtwdev, addr, mask);
1945 				if (rpt == val)
1946 					goto poll_done;
1947 
1948 				fsleep(1);
1949 			}
1950 			rtw89_warn(rtwdev, "failed to poll AFE cat=%u addr=0x%x mask=0x%x\n",
1951 				   cat, addr, mask);
1952 poll_done:
1953 			break;
1954 		case RTW89_FW_AFE_ACTION_DELAY:
1955 			fsleep(addr);
1956 			break;
1957 		}
1958 	}
1959 }
1960 
rtw89_phy_nctl_poll(struct rtw89_dev * rtwdev)1961 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
1962 {
1963 	rtw89_phy_write32(rtwdev, 0x8080, 0x4);
1964 	udelay(1);
1965 	return rtw89_phy_read32(rtwdev, 0x8080);
1966 }
1967 
rtw89_phy_init_rf_reg(struct rtw89_dev * rtwdev,bool noio)1968 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio)
1969 {
1970 	void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg,
1971 		       enum rtw89_rf_path rf_path, void *data);
1972 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1973 	const struct rtw89_chip_info *chip = rtwdev->chip;
1974 	const struct rtw89_phy_table *rf_table;
1975 	struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
1976 	u8 path;
1977 
1978 	rf_reg_info = kzalloc_obj(*rf_reg_info);
1979 	if (!rf_reg_info)
1980 		return;
1981 
1982 	for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
1983 		rf_table = elm_info->rf_radio[path] ?
1984 			   elm_info->rf_radio[path] : chip->rf_table[path];
1985 		rf_reg_info->rf_path = rf_table->rf_path;
1986 		if (noio)
1987 			config = rtw89_phy_config_rf_reg_noio;
1988 		else
1989 			config = rf_table->config ? rf_table->config :
1990 				 rtw89_phy_config_rf_reg;
1991 		rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info);
1992 		if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
1993 			rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
1994 				   rf_reg_info->rf_path);
1995 	}
1996 	kfree(rf_reg_info);
1997 }
1998 
rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev * rtwdev)1999 static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
2000 {
2001 	const struct rtw89_chip_info *chip = rtwdev->chip;
2002 	u32 val;
2003 	int ret;
2004 
2005 	/* IQK/DPK clock & reset */
2006 	rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3);
2007 	rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1);
2008 	rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
2009 	if (chip->chip_id != RTL8851B)
2010 		rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
2011 	if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT)
2012 		rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
2013 
2014 	/* check 0x8080 */
2015 	rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8);
2016 
2017 	ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
2018 				1000, false, rtwdev);
2019 	if (ret)
2020 		rtw89_err(rtwdev, "failed to poll nctl block\n");
2021 }
2022 
rtw89_phy_init_rf_nctl(struct rtw89_dev * rtwdev)2023 static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
2024 {
2025 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
2026 	const struct rtw89_chip_info *chip = rtwdev->chip;
2027 	const struct rtw89_phy_table *nctl_table;
2028 
2029 	rtw89_phy_preinit_rf_nctl(rtwdev);
2030 
2031 	nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table;
2032 	rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
2033 
2034 	if (chip->nctl_post_table)
2035 		rtw89_rfk_parser(rtwdev, chip->nctl_post_table);
2036 }
2037 
rtw89_phy0_phy1_offset_ax(struct rtw89_dev * rtwdev,u32 addr)2038 static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr)
2039 {
2040 	u32 phy_page = addr >> 8;
2041 	u32 ofst = 0;
2042 
2043 	switch (phy_page) {
2044 	case 0x6:
2045 	case 0x7:
2046 	case 0x8:
2047 	case 0x9:
2048 	case 0xa:
2049 	case 0xb:
2050 	case 0xc:
2051 	case 0xd:
2052 	case 0x19:
2053 	case 0x1a:
2054 	case 0x1b:
2055 		ofst = 0x2000;
2056 		break;
2057 	default:
2058 		/* warning case */
2059 		ofst = 0;
2060 		break;
2061 	}
2062 
2063 	if (phy_page >= 0x40 && phy_page <= 0x4f)
2064 		ofst = 0x2000;
2065 
2066 	return ofst;
2067 }
2068 
rtw89_phy_write32_idx(struct rtw89_dev * rtwdev,u32 addr,u32 mask,u32 data,enum rtw89_phy_idx phy_idx)2069 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
2070 			   u32 data, enum rtw89_phy_idx phy_idx)
2071 {
2072 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
2073 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
2074 	rtw89_phy_write32_mask(rtwdev, addr, mask, data);
2075 }
2076 EXPORT_SYMBOL(rtw89_phy_write32_idx);
2077 
rtw89_phy_write32_idx_set(struct rtw89_dev * rtwdev,u32 addr,u32 bits,enum rtw89_phy_idx phy_idx)2078 void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
2079 			       enum rtw89_phy_idx phy_idx)
2080 {
2081 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
2082 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
2083 	rtw89_phy_write32_set(rtwdev, addr, bits);
2084 }
2085 EXPORT_SYMBOL(rtw89_phy_write32_idx_set);
2086 
rtw89_phy_write32_idx_clr(struct rtw89_dev * rtwdev,u32 addr,u32 bits,enum rtw89_phy_idx phy_idx)2087 void rtw89_phy_write32_idx_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits,
2088 			       enum rtw89_phy_idx phy_idx)
2089 {
2090 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
2091 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
2092 	rtw89_phy_write32_clr(rtwdev, addr, bits);
2093 }
2094 EXPORT_SYMBOL(rtw89_phy_write32_idx_clr);
2095 
rtw89_phy_read32_idx(struct rtw89_dev * rtwdev,u32 addr,u32 mask,enum rtw89_phy_idx phy_idx)2096 u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
2097 			 enum rtw89_phy_idx phy_idx)
2098 {
2099 	if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
2100 		addr += rtw89_phy0_phy1_offset(rtwdev, addr);
2101 	return rtw89_phy_read32_mask(rtwdev, addr, mask);
2102 }
2103 EXPORT_SYMBOL(rtw89_phy_read32_idx);
2104 
rtw89_phy_set_phy_regs(struct rtw89_dev * rtwdev,u32 addr,u32 mask,u32 val)2105 void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
2106 			    u32 val)
2107 {
2108 	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
2109 
2110 	if (!rtwdev->dbcc_en)
2111 		return;
2112 
2113 	rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
2114 }
2115 EXPORT_SYMBOL(rtw89_phy_set_phy_regs);
2116 
rtw89_phy_write_reg3_tbl(struct rtw89_dev * rtwdev,const struct rtw89_phy_reg3_tbl * tbl)2117 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
2118 			      const struct rtw89_phy_reg3_tbl *tbl)
2119 {
2120 	const struct rtw89_reg3_def *reg3;
2121 	int i;
2122 
2123 	for (i = 0; i < tbl->size; i++) {
2124 		reg3 = &tbl->reg3[i];
2125 		rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
2126 	}
2127 }
2128 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
2129 
rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev * rtwdev,u8 ant_gain_regd)2130 static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd)
2131 {
2132 	switch (ant_gain_regd) {
2133 	case RTW89_ANT_GAIN_ETSI:
2134 		return RTW89_ETSI;
2135 	default:
2136 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2137 			    "unknown antenna gain domain: %d\n",
2138 			    ant_gain_regd);
2139 		return RTW89_REGD_NUM;
2140 	}
2141 }
2142 
2143 /* antenna gain in unit of 0.25 dbm */
2144 #define RTW89_ANT_GAIN_2GHZ_MIN -8
2145 #define RTW89_ANT_GAIN_2GHZ_MAX 14
2146 #define RTW89_ANT_GAIN_5GHZ_MIN -8
2147 #define RTW89_ANT_GAIN_5GHZ_MAX 20
2148 #define RTW89_ANT_GAIN_6GHZ_MIN -8
2149 #define RTW89_ANT_GAIN_6GHZ_MAX 20
2150 
2151 #define RTW89_ANT_GAIN_REF_2GHZ 14
2152 #define RTW89_ANT_GAIN_REF_5GHZ 20
2153 #define RTW89_ANT_GAIN_REF_6GHZ 20
2154 
rtw89_phy_ant_gain_init(struct rtw89_dev * rtwdev)2155 void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev)
2156 {
2157 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2158 	const struct rtw89_chip_info *chip = rtwdev->chip;
2159 	struct rtw89_acpi_rtag_result res = {};
2160 	u32 domain;
2161 	int ret;
2162 	u8 i, j;
2163 	u8 regd;
2164 	u8 val;
2165 
2166 	if (!chip->support_ant_gain)
2167 		return;
2168 
2169 	ret = rtw89_acpi_evaluate_rtag(rtwdev, &res);
2170 	if (ret) {
2171 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2172 			    "acpi: cannot eval rtag: %d\n", ret);
2173 		return;
2174 	}
2175 
2176 	if (res.revision != 0) {
2177 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2178 			    "unknown rtag revision: %d\n", res.revision);
2179 		return;
2180 	}
2181 
2182 	domain = get_unaligned_le32(&res.domain);
2183 
2184 	for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) {
2185 		if (!(domain & BIT(i)))
2186 			continue;
2187 
2188 		regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i);
2189 		if (regd >= RTW89_REGD_NUM)
2190 			continue;
2191 		ant_gain->regd_enabled |= BIT(regd);
2192 	}
2193 
2194 	for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) {
2195 		for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) {
2196 			val = res.ant_gain_table[i][j];
2197 			switch (j) {
2198 			default:
2199 			case RTW89_ANT_GAIN_2GHZ_SUBBAND:
2200 				val = RTW89_ANT_GAIN_REF_2GHZ -
2201 				      clamp_t(s8, val,
2202 					      RTW89_ANT_GAIN_2GHZ_MIN,
2203 					      RTW89_ANT_GAIN_2GHZ_MAX);
2204 				break;
2205 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_1:
2206 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_2:
2207 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E:
2208 			case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4:
2209 				val = RTW89_ANT_GAIN_REF_5GHZ -
2210 				      clamp_t(s8, val,
2211 					      RTW89_ANT_GAIN_5GHZ_MIN,
2212 					      RTW89_ANT_GAIN_5GHZ_MAX);
2213 				break;
2214 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L:
2215 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H:
2216 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_6:
2217 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L:
2218 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H:
2219 			case RTW89_ANT_GAIN_6GHZ_SUBBAND_8:
2220 				val = RTW89_ANT_GAIN_REF_6GHZ -
2221 				      clamp_t(s8, val,
2222 					      RTW89_ANT_GAIN_6GHZ_MIN,
2223 					      RTW89_ANT_GAIN_6GHZ_MAX);
2224 			}
2225 			ant_gain->offset[i][j] = val;
2226 		}
2227 	}
2228 }
2229 
2230 static
rtw89_phy_ant_gain_get_subband(struct rtw89_dev * rtwdev,u32 center_freq)2231 enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev,
2232 							   u32 center_freq)
2233 {
2234 	switch (center_freq) {
2235 	default:
2236 		rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2237 			    "center freq: %u to antenna gain subband is unhandled\n",
2238 			    center_freq);
2239 		fallthrough;
2240 	case 2412 ... 2484:
2241 		return RTW89_ANT_GAIN_2GHZ_SUBBAND;
2242 	case 5180 ... 5240:
2243 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_1;
2244 	case 5250 ... 5320:
2245 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_2;
2246 	case 5500 ... 5720:
2247 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E;
2248 	case 5745 ... 5885:
2249 		return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4;
2250 	case 5955 ... 6155:
2251 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L;
2252 	case 6175 ... 6415:
2253 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H;
2254 	case 6435 ... 6515:
2255 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_6;
2256 	case 6535 ... 6695:
2257 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L;
2258 	case 6715 ... 6855:
2259 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H;
2260 
2261 	/* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H
2262 	 * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with
2263 	 * struct rtw89_6ghz_span.
2264 	 */
2265 
2266 	case 6895 ... 7115:
2267 		return RTW89_ANT_GAIN_6GHZ_SUBBAND_8;
2268 	}
2269 }
2270 
rtw89_phy_ant_gain_query(struct rtw89_dev * rtwdev,enum rtw89_rf_path path,u32 center_freq)2271 static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev,
2272 				   enum rtw89_rf_path path, u32 center_freq)
2273 {
2274 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2275 	enum rtw89_ant_gain_subband subband_l, subband_h;
2276 	const struct rtw89_6ghz_span *span;
2277 
2278 	span = rtw89_get_6ghz_span(rtwdev, center_freq);
2279 
2280 	if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) {
2281 		subband_l = span->ant_gain_subband_low;
2282 		subband_h = span->ant_gain_subband_high;
2283 	} else {
2284 		subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq);
2285 		subband_h = subband_l;
2286 	}
2287 
2288 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
2289 		    "center_freq %u: antenna gain subband {%u, %u}\n",
2290 		    center_freq, subband_l, subband_h);
2291 
2292 	return min(ant_gain->offset[path][subband_l],
2293 		   ant_gain->offset[path][subband_h]);
2294 }
2295 
rtw89_phy_ant_gain_offset(struct rtw89_dev * rtwdev,u32 center_freq)2296 static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u32 center_freq)
2297 {
2298 	s8 offset_patha, offset_pathb;
2299 
2300 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq);
2301 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq);
2302 
2303 	if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
2304 		return min(offset_patha, offset_pathb);
2305 
2306 	return max(offset_patha, offset_pathb);
2307 }
2308 
rtw89_can_apply_ant_gain(struct rtw89_dev * rtwdev,u8 band)2309 static bool rtw89_can_apply_ant_gain(struct rtw89_dev *rtwdev, u8 band)
2310 {
2311 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2312 	struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain;
2313 	const struct rtw89_chip_info *chip = rtwdev->chip;
2314 	u8 regd = rtw89_regd_get(rtwdev, band);
2315 
2316 	if (!chip->support_ant_gain)
2317 		return false;
2318 
2319 	if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd)))
2320 		return false;
2321 
2322 	if (!rfe_parms->has_da)
2323 		return false;
2324 
2325 	return true;
2326 }
2327 
rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan)2328 s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev,
2329 				  const struct rtw89_chan *chan)
2330 {
2331 	s8 offset_patha, offset_pathb;
2332 
2333 	if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type))
2334 		return 0;
2335 
2336 	if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw))
2337 		return 0;
2338 
2339 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
2340 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
2341 
2342 	return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb);
2343 }
2344 EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset);
2345 
rtw89_print_ant_gain(struct rtw89_dev * rtwdev,char * buf,size_t bufsz,const struct rtw89_chan * chan)2346 int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz,
2347 			 const struct rtw89_chan *chan)
2348 {
2349 	char *p = buf, *end = buf + bufsz;
2350 	s8 offset_patha, offset_pathb;
2351 
2352 	if (!rtw89_can_apply_ant_gain(rtwdev, chan->band_type)) {
2353 		p += scnprintf(p, end - p, "no DAG is applied\n");
2354 		goto out;
2355 	}
2356 
2357 	offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq);
2358 	offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq);
2359 
2360 	p += scnprintf(p, end - p, "ChainA offset: %d dBm\n", offset_patha);
2361 	p += scnprintf(p, end - p, "ChainB offset: %d dBm\n", offset_pathb);
2362 
2363 out:
2364 	return p - buf;
2365 }
2366 
2367 static const u8 rtw89_rs_idx_num_ax[] = {
2368 	[RTW89_RS_CCK] = RTW89_RATE_CCK_NUM,
2369 	[RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM,
2370 	[RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX,
2371 	[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM,
2372 	[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX,
2373 };
2374 
2375 static const u8 rtw89_rs_nss_num_ax[] = {
2376 	[RTW89_RS_CCK] = 1,
2377 	[RTW89_RS_OFDM] = 1,
2378 	[RTW89_RS_MCS] = RTW89_NSS_NUM,
2379 	[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM,
2380 	[RTW89_RS_OFFSET] = 1,
2381 };
2382 
rtw89_phy_raw_byr_seek(struct rtw89_dev * rtwdev,struct rtw89_txpwr_byrate * head,const struct rtw89_rate_desc * desc)2383 s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev,
2384 			   struct rtw89_txpwr_byrate *head,
2385 			   const struct rtw89_rate_desc *desc)
2386 {
2387 	switch (desc->rs) {
2388 	case RTW89_RS_CCK:
2389 		return &head->cck[desc->idx];
2390 	case RTW89_RS_OFDM:
2391 		return &head->ofdm[desc->idx];
2392 	case RTW89_RS_MCS:
2393 		return &head->mcs[desc->ofdma][desc->nss][desc->idx];
2394 	case RTW89_RS_HEDCM:
2395 		return &head->hedcm[desc->ofdma][desc->nss][desc->idx];
2396 	case RTW89_RS_OFFSET:
2397 		return &head->offset[desc->idx];
2398 	default:
2399 		rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs);
2400 		return &head->trap;
2401 	}
2402 }
2403 
rtw89_phy_load_txpwr_byrate(struct rtw89_dev * rtwdev,const struct rtw89_txpwr_table * tbl)2404 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
2405 				 const struct rtw89_txpwr_table *tbl)
2406 {
2407 	const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
2408 	const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
2409 	struct rtw89_txpwr_byrate *byr_head;
2410 	struct rtw89_rate_desc desc = {};
2411 	s8 *byr;
2412 	u32 data;
2413 	u8 i;
2414 
2415 	for (; cfg < end; cfg++) {
2416 		byr_head = &rtwdev->byr[cfg->band][0];
2417 		desc.rs = cfg->rs;
2418 		desc.nss = cfg->nss;
2419 		data = cfg->data;
2420 
2421 		for (i = 0; i < cfg->len; i++, data >>= 8) {
2422 			desc.idx = cfg->shf + i;
2423 			byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
2424 			*byr = data & 0xff;
2425 		}
2426 	}
2427 }
2428 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
2429 
rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)2430 static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
2431 {
2432 	const u8 tssi_deviation_point = 0;
2433 	const u8 tssi_max_deviation = 2;
2434 
2435 	if (dbm <= tssi_deviation_point)
2436 		dbm -= tssi_max_deviation;
2437 
2438 	return dbm;
2439 }
2440 
rtw89_phy_get_tpe_constraint(struct rtw89_dev * rtwdev,u8 band)2441 static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band)
2442 {
2443 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2444 	const struct rtw89_reg_6ghz_tpe *tpe = &regulatory->reg_6ghz_tpe;
2445 	s8 cstr = S8_MAX;
2446 
2447 	if (band == RTW89_BAND_6G && tpe->valid)
2448 		cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint);
2449 
2450 	return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr);
2451 }
2452 
rtw89_phy_read_txpwr_byrate(struct rtw89_dev * rtwdev,u8 band,u8 bw,const struct rtw89_rate_desc * rate_desc)2453 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
2454 			       const struct rtw89_rate_desc *rate_desc)
2455 {
2456 	struct rtw89_txpwr_byrate *byr_head;
2457 	s8 *byr;
2458 
2459 	if (rate_desc->rs == RTW89_RS_CCK)
2460 		band = RTW89_BAND_2G;
2461 
2462 	byr_head = &rtwdev->byr[band][bw];
2463 	byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc);
2464 
2465 	return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr);
2466 }
2467 
rtw89_channel_6g_to_idx(struct rtw89_dev * rtwdev,u8 channel_6g)2468 static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g)
2469 {
2470 	switch (channel_6g) {
2471 	case 1 ... 29:
2472 		return (channel_6g - 1) / 2;
2473 	case 33 ... 61:
2474 		return (channel_6g - 3) / 2;
2475 	case 65 ... 93:
2476 		return (channel_6g - 5) / 2;
2477 	case 97 ... 125:
2478 		return (channel_6g - 7) / 2;
2479 	case 129 ... 157:
2480 		return (channel_6g - 9) / 2;
2481 	case 161 ... 189:
2482 		return (channel_6g - 11) / 2;
2483 	case 193 ... 221:
2484 		return (channel_6g - 13) / 2;
2485 	case 225 ... 253:
2486 		return (channel_6g - 15) / 2;
2487 	default:
2488 		rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g);
2489 		return 0;
2490 	}
2491 }
2492 
rtw89_channel_to_idx(struct rtw89_dev * rtwdev,u8 band,u8 channel)2493 static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel)
2494 {
2495 	if (band == RTW89_BAND_6G)
2496 		return rtw89_channel_6g_to_idx(rtwdev, channel);
2497 
2498 	switch (channel) {
2499 	case 1 ... 14:
2500 		return channel - 1;
2501 	case 36 ... 64:
2502 		return (channel - 36) / 2;
2503 	case 100 ... 144:
2504 		return ((channel - 100) / 2) + 15;
2505 	case 149 ... 177:
2506 		return ((channel - 149) / 2) + 38;
2507 	default:
2508 		rtw89_warn(rtwdev, "unknown channel: %d\n", channel);
2509 		return 0;
2510 	}
2511 }
2512 
rtw89_phy_validate_txpwr_limit_bw(struct rtw89_dev * rtwdev,u8 band,u8 bw)2513 static bool rtw89_phy_validate_txpwr_limit_bw(struct rtw89_dev *rtwdev,
2514 					      u8 band, u8 bw)
2515 {
2516 	switch (band) {
2517 	case RTW89_BAND_2G:
2518 		return bw < RTW89_2G_BW_NUM;
2519 	case RTW89_BAND_5G:
2520 		return bw < RTW89_5G_BW_NUM;
2521 	case RTW89_BAND_6G:
2522 		return bw < RTW89_6G_BW_NUM;
2523 	default:
2524 		return false;
2525 	}
2526 }
2527 
rtw89_phy_read_txpwr_limit(struct rtw89_dev * rtwdev,u8 band,u8 bw,u8 ntx,u8 rs,u8 bf,u8 ch)2528 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
2529 			      u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
2530 {
2531 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2532 	const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
2533 	const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
2534 	const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
2535 	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
2536 	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
2537 	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
2538 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2539 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
2540 	bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
2541 	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
2542 	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
2543 	s8 lmt = 0, da_lmt = S8_MAX, sar, offset = 0;
2544 	u8 regd = rtw89_regd_get(rtwdev, band);
2545 	u8 reg6 = regulatory->reg_6ghz_power;
2546 	struct rtw89_sar_parm sar_parm = {
2547 		.center_freq = freq,
2548 		.ntx = ntx,
2549 	};
2550 	s8 cstr;
2551 
2552 	if (!rtw89_phy_validate_txpwr_limit_bw(rtwdev, band, bw)) {
2553 		rtw89_warn(rtwdev, "invalid band %u bandwidth %u\n", band, bw);
2554 		return 0;
2555 	}
2556 
2557 	switch (band) {
2558 	case RTW89_BAND_2G:
2559 		if (has_ant_gain)
2560 			da_lmt = (*rule_da_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2561 
2562 		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2563 		if (lmt)
2564 			break;
2565 
2566 		lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
2567 		break;
2568 	case RTW89_BAND_5G:
2569 		if (has_ant_gain)
2570 			da_lmt = (*rule_da_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2571 
2572 		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx];
2573 		if (lmt)
2574 			break;
2575 
2576 		lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx];
2577 		break;
2578 	case RTW89_BAND_6G:
2579 		if (has_ant_gain)
2580 			da_lmt = (*rule_da_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
2581 
2582 		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx];
2583 		if (lmt)
2584 			break;
2585 
2586 		lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW]
2587 				       [RTW89_REG_6GHZ_POWER_DFLT]
2588 				       [ch_idx];
2589 		break;
2590 	default:
2591 		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
2592 		return 0;
2593 	}
2594 
2595 	da_lmt = da_lmt ?: S8_MAX;
2596 	if (da_lmt != S8_MAX)
2597 		offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
2598 
2599 	lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt + offset, da_lmt));
2600 	sar = rtw89_query_sar(rtwdev, &sar_parm);
2601 	cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
2602 
2603 	return min3(lmt, sar, cstr);
2604 }
2605 EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
2606 
2607 #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch)		\
2608 	do {								\
2609 		u8 __i;							\
2610 		for (__i = 0; __i < RTW89_BF_NUM; __i++)		\
2611 			ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev,	\
2612 							      band,	\
2613 							      bw, ntx,	\
2614 							      rs, __i,	\
2615 							      (ch));	\
2616 	} while (0)
2617 
rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ax * lmt,u8 band,u8 ntx,u8 ch)2618 static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev,
2619 					      struct rtw89_txpwr_limit_ax *lmt,
2620 					      u8 band, u8 ntx, u8 ch)
2621 {
2622 	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
2623 				    ntx, RTW89_RS_CCK, ch);
2624 	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
2625 				    ntx, RTW89_RS_CCK, ch);
2626 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2627 				    ntx, RTW89_RS_OFDM, ch);
2628 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2629 				    RTW89_CHANNEL_WIDTH_20,
2630 				    ntx, RTW89_RS_MCS, ch);
2631 }
2632 
rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ax * lmt,u8 band,u8 ntx,u8 ch,u8 pri_ch)2633 static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev,
2634 					      struct rtw89_txpwr_limit_ax *lmt,
2635 					      u8 band, u8 ntx, u8 ch, u8 pri_ch)
2636 {
2637 	__fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20,
2638 				    ntx, RTW89_RS_CCK, ch - 2);
2639 	__fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40,
2640 				    ntx, RTW89_RS_CCK, ch);
2641 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2642 				    ntx, RTW89_RS_OFDM, pri_ch);
2643 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2644 				    RTW89_CHANNEL_WIDTH_20,
2645 				    ntx, RTW89_RS_MCS, ch - 2);
2646 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2647 				    RTW89_CHANNEL_WIDTH_20,
2648 				    ntx, RTW89_RS_MCS, ch + 2);
2649 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2650 				    RTW89_CHANNEL_WIDTH_40,
2651 				    ntx, RTW89_RS_MCS, ch);
2652 }
2653 
rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ax * lmt,u8 band,u8 ntx,u8 ch,u8 pri_ch)2654 static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev,
2655 					      struct rtw89_txpwr_limit_ax *lmt,
2656 					      u8 band, u8 ntx, u8 ch, u8 pri_ch)
2657 {
2658 	s8 val_0p5_n[RTW89_BF_NUM];
2659 	s8 val_0p5_p[RTW89_BF_NUM];
2660 	u8 i;
2661 
2662 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2663 				    ntx, RTW89_RS_OFDM, pri_ch);
2664 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2665 				    RTW89_CHANNEL_WIDTH_20,
2666 				    ntx, RTW89_RS_MCS, ch - 6);
2667 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2668 				    RTW89_CHANNEL_WIDTH_20,
2669 				    ntx, RTW89_RS_MCS, ch - 2);
2670 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
2671 				    RTW89_CHANNEL_WIDTH_20,
2672 				    ntx, RTW89_RS_MCS, ch + 2);
2673 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
2674 				    RTW89_CHANNEL_WIDTH_20,
2675 				    ntx, RTW89_RS_MCS, ch + 6);
2676 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2677 				    RTW89_CHANNEL_WIDTH_40,
2678 				    ntx, RTW89_RS_MCS, ch - 4);
2679 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
2680 				    RTW89_CHANNEL_WIDTH_40,
2681 				    ntx, RTW89_RS_MCS, ch + 4);
2682 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
2683 				    RTW89_CHANNEL_WIDTH_80,
2684 				    ntx, RTW89_RS_MCS, ch);
2685 
2686 	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
2687 				    ntx, RTW89_RS_MCS, ch - 4);
2688 	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
2689 				    ntx, RTW89_RS_MCS, ch + 4);
2690 
2691 	for (i = 0; i < RTW89_BF_NUM; i++)
2692 		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
2693 }
2694 
rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ax * lmt,u8 band,u8 ntx,u8 ch,u8 pri_ch)2695 static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev,
2696 					       struct rtw89_txpwr_limit_ax *lmt,
2697 					       u8 band, u8 ntx, u8 ch, u8 pri_ch)
2698 {
2699 	s8 val_0p5_n[RTW89_BF_NUM];
2700 	s8 val_0p5_p[RTW89_BF_NUM];
2701 	s8 val_2p5_n[RTW89_BF_NUM];
2702 	s8 val_2p5_p[RTW89_BF_NUM];
2703 	u8 i;
2704 
2705 	/* fill ofdm section */
2706 	__fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20,
2707 				    ntx, RTW89_RS_OFDM, pri_ch);
2708 
2709 	/* fill mcs 20m section */
2710 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band,
2711 				    RTW89_CHANNEL_WIDTH_20,
2712 				    ntx, RTW89_RS_MCS, ch - 14);
2713 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band,
2714 				    RTW89_CHANNEL_WIDTH_20,
2715 				    ntx, RTW89_RS_MCS, ch - 10);
2716 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band,
2717 				    RTW89_CHANNEL_WIDTH_20,
2718 				    ntx, RTW89_RS_MCS, ch - 6);
2719 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band,
2720 				    RTW89_CHANNEL_WIDTH_20,
2721 				    ntx, RTW89_RS_MCS, ch - 2);
2722 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band,
2723 				    RTW89_CHANNEL_WIDTH_20,
2724 				    ntx, RTW89_RS_MCS, ch + 2);
2725 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band,
2726 				    RTW89_CHANNEL_WIDTH_20,
2727 				    ntx, RTW89_RS_MCS, ch + 6);
2728 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band,
2729 				    RTW89_CHANNEL_WIDTH_20,
2730 				    ntx, RTW89_RS_MCS, ch + 10);
2731 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band,
2732 				    RTW89_CHANNEL_WIDTH_20,
2733 				    ntx, RTW89_RS_MCS, ch + 14);
2734 
2735 	/* fill mcs 40m section */
2736 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band,
2737 				    RTW89_CHANNEL_WIDTH_40,
2738 				    ntx, RTW89_RS_MCS, ch - 12);
2739 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band,
2740 				    RTW89_CHANNEL_WIDTH_40,
2741 				    ntx, RTW89_RS_MCS, ch - 4);
2742 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band,
2743 				    RTW89_CHANNEL_WIDTH_40,
2744 				    ntx, RTW89_RS_MCS, ch + 4);
2745 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band,
2746 				    RTW89_CHANNEL_WIDTH_40,
2747 				    ntx, RTW89_RS_MCS, ch + 12);
2748 
2749 	/* fill mcs 80m section */
2750 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band,
2751 				    RTW89_CHANNEL_WIDTH_80,
2752 				    ntx, RTW89_RS_MCS, ch - 8);
2753 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band,
2754 				    RTW89_CHANNEL_WIDTH_80,
2755 				    ntx, RTW89_RS_MCS, ch + 8);
2756 
2757 	/* fill mcs 160m section */
2758 	__fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band,
2759 				    RTW89_CHANNEL_WIDTH_160,
2760 				    ntx, RTW89_RS_MCS, ch);
2761 
2762 	/* fill mcs 40m 0p5 section */
2763 	__fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40,
2764 				    ntx, RTW89_RS_MCS, ch - 4);
2765 	__fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40,
2766 				    ntx, RTW89_RS_MCS, ch + 4);
2767 
2768 	for (i = 0; i < RTW89_BF_NUM; i++)
2769 		lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
2770 
2771 	/* fill mcs 40m 2p5 section */
2772 	__fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40,
2773 				    ntx, RTW89_RS_MCS, ch - 8);
2774 	__fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40,
2775 				    ntx, RTW89_RS_MCS, ch + 8);
2776 
2777 	for (i = 0; i < RTW89_BF_NUM; i++)
2778 		lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]);
2779 }
2780 
2781 static
rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,struct rtw89_txpwr_limit_ax * lmt,u8 ntx)2782 void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev,
2783 				   const struct rtw89_chan *chan,
2784 				   struct rtw89_txpwr_limit_ax *lmt,
2785 				   u8 ntx)
2786 {
2787 	u8 band = chan->band_type;
2788 	u8 pri_ch = chan->primary_channel;
2789 	u8 ch = chan->channel;
2790 	u8 bw = chan->band_width;
2791 
2792 	memset(lmt, 0, sizeof(*lmt));
2793 
2794 	switch (bw) {
2795 	case RTW89_CHANNEL_WIDTH_20:
2796 		rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch);
2797 		break;
2798 	case RTW89_CHANNEL_WIDTH_40:
2799 		rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch,
2800 						  pri_ch);
2801 		break;
2802 	case RTW89_CHANNEL_WIDTH_80:
2803 		rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch,
2804 						  pri_ch);
2805 		break;
2806 	case RTW89_CHANNEL_WIDTH_160:
2807 		rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch,
2808 						   pri_ch);
2809 		break;
2810 	}
2811 }
2812 
rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev * rtwdev,u8 band,u8 ru,u8 ntx,u8 ch)2813 s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
2814 				 u8 ru, u8 ntx, u8 ch)
2815 {
2816 	const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
2817 	const struct rtw89_txpwr_rule_2ghz *rule_da_2ghz = &rfe_parms->rule_da_2ghz;
2818 	const struct rtw89_txpwr_rule_5ghz *rule_da_5ghz = &rfe_parms->rule_da_5ghz;
2819 	const struct rtw89_txpwr_rule_6ghz *rule_da_6ghz = &rfe_parms->rule_da_6ghz;
2820 	const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz;
2821 	const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz;
2822 	const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz;
2823 	struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
2824 	enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band);
2825 	bool has_ant_gain = rtw89_can_apply_ant_gain(rtwdev, band);
2826 	u32 freq = ieee80211_channel_to_frequency(ch, nl_band);
2827 	u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch);
2828 	s8 lmt_ru = 0, da_lmt_ru = S8_MAX, sar, offset = 0;
2829 	u8 regd = rtw89_regd_get(rtwdev, band);
2830 	u8 reg6 = regulatory->reg_6ghz_power;
2831 	struct rtw89_sar_parm sar_parm = {
2832 		.center_freq = freq,
2833 		.ntx = ntx,
2834 	};
2835 	s8 cstr;
2836 
2837 	switch (band) {
2838 	case RTW89_BAND_2G:
2839 		if (has_ant_gain)
2840 			da_lmt_ru = (*rule_da_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2841 
2842 		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2843 		if (lmt_ru)
2844 			break;
2845 
2846 		lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
2847 		break;
2848 	case RTW89_BAND_5G:
2849 		if (has_ant_gain)
2850 			da_lmt_ru = (*rule_da_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2851 
2852 		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx];
2853 		if (lmt_ru)
2854 			break;
2855 
2856 		lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx];
2857 		break;
2858 	case RTW89_BAND_6G:
2859 		if (has_ant_gain)
2860 			da_lmt_ru = (*rule_da_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
2861 
2862 		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx];
2863 		if (lmt_ru)
2864 			break;
2865 
2866 		lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW]
2867 					     [RTW89_REG_6GHZ_POWER_DFLT]
2868 					     [ch_idx];
2869 		break;
2870 	default:
2871 		rtw89_warn(rtwdev, "unknown band type: %d\n", band);
2872 		return 0;
2873 	}
2874 
2875 	da_lmt_ru = da_lmt_ru ?: S8_MAX;
2876 	if (da_lmt_ru != S8_MAX)
2877 		offset = rtw89_phy_ant_gain_offset(rtwdev, freq);
2878 
2879 	lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, min(lmt_ru + offset, da_lmt_ru));
2880 	sar = rtw89_query_sar(rtwdev, &sar_parm);
2881 	cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
2882 
2883 	return min3(lmt_ru, sar, cstr);
2884 }
2885 
2886 static void
rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 band,u8 ntx,u8 ch)2887 rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev,
2888 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2889 				     u8 band, u8 ntx, u8 ch)
2890 {
2891 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2892 							RTW89_RU26,
2893 							ntx, ch);
2894 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2895 							RTW89_RU52,
2896 							ntx, ch);
2897 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2898 							 RTW89_RU106,
2899 							 ntx, ch);
2900 }
2901 
2902 static void
rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 band,u8 ntx,u8 ch)2903 rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev,
2904 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2905 				     u8 band, u8 ntx, u8 ch)
2906 {
2907 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2908 							RTW89_RU26,
2909 							ntx, ch - 2);
2910 	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2911 							RTW89_RU26,
2912 							ntx, ch + 2);
2913 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2914 							RTW89_RU52,
2915 							ntx, ch - 2);
2916 	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2917 							RTW89_RU52,
2918 							ntx, ch + 2);
2919 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2920 							 RTW89_RU106,
2921 							 ntx, ch - 2);
2922 	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2923 							 RTW89_RU106,
2924 							 ntx, ch + 2);
2925 }
2926 
2927 static void
rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 band,u8 ntx,u8 ch)2928 rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev,
2929 				     struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2930 				     u8 band, u8 ntx, u8 ch)
2931 {
2932 	lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2933 							RTW89_RU26,
2934 							ntx, ch - 6);
2935 	lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2936 							RTW89_RU26,
2937 							ntx, ch - 2);
2938 	lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2939 							RTW89_RU26,
2940 							ntx, ch + 2);
2941 	lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2942 							RTW89_RU26,
2943 							ntx, ch + 6);
2944 	lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2945 							RTW89_RU52,
2946 							ntx, ch - 6);
2947 	lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2948 							RTW89_RU52,
2949 							ntx, ch - 2);
2950 	lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2951 							RTW89_RU52,
2952 							ntx, ch + 2);
2953 	lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2954 							RTW89_RU52,
2955 							ntx, ch + 6);
2956 	lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2957 							 RTW89_RU106,
2958 							 ntx, ch - 6);
2959 	lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2960 							 RTW89_RU106,
2961 							 ntx, ch - 2);
2962 	lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2963 							 RTW89_RU106,
2964 							 ntx, ch + 2);
2965 	lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2966 							 RTW89_RU106,
2967 							 ntx, ch + 6);
2968 }
2969 
2970 static void
rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev * rtwdev,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 band,u8 ntx,u8 ch)2971 rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev,
2972 				      struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2973 				      u8 band, u8 ntx, u8 ch)
2974 {
2975 	static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 };
2976 	int i;
2977 
2978 	static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX);
2979 	for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) {
2980 		lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2981 								RTW89_RU26,
2982 								ntx,
2983 								ch + ofst[i]);
2984 		lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2985 								RTW89_RU52,
2986 								ntx,
2987 								ch + ofst[i]);
2988 		lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band,
2989 								 RTW89_RU106,
2990 								 ntx,
2991 								 ch + ofst[i]);
2992 	}
2993 }
2994 
2995 static
rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,struct rtw89_txpwr_limit_ru_ax * lmt_ru,u8 ntx)2996 void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev,
2997 				      const struct rtw89_chan *chan,
2998 				      struct rtw89_txpwr_limit_ru_ax *lmt_ru,
2999 				      u8 ntx)
3000 {
3001 	u8 band = chan->band_type;
3002 	u8 ch = chan->channel;
3003 	u8 bw = chan->band_width;
3004 
3005 	memset(lmt_ru, 0, sizeof(*lmt_ru));
3006 
3007 	switch (bw) {
3008 	case RTW89_CHANNEL_WIDTH_20:
3009 		rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx,
3010 						     ch);
3011 		break;
3012 	case RTW89_CHANNEL_WIDTH_40:
3013 		rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx,
3014 						     ch);
3015 		break;
3016 	case RTW89_CHANNEL_WIDTH_80:
3017 		rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx,
3018 						     ch);
3019 		break;
3020 	case RTW89_CHANNEL_WIDTH_160:
3021 		rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx,
3022 						      ch);
3023 		break;
3024 	}
3025 }
3026 
rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)3027 static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev,
3028 					  const struct rtw89_chan *chan,
3029 					  enum rtw89_phy_idx phy_idx)
3030 {
3031 	u8 max_nss_num = rtwdev->chip->rf_path_num;
3032 	static const u8 rs[] = {
3033 		RTW89_RS_CCK,
3034 		RTW89_RS_OFDM,
3035 		RTW89_RS_MCS,
3036 		RTW89_RS_HEDCM,
3037 	};
3038 	struct rtw89_rate_desc cur = {};
3039 	u8 band = chan->band_type;
3040 	u8 ch = chan->channel;
3041 	u32 addr, val;
3042 	s8 v[4] = {};
3043 	u8 i;
3044 
3045 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
3046 		    "[TXPWR] set txpwr byrate with ch=%d\n", ch);
3047 
3048 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4);
3049 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4);
3050 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4);
3051 	BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4);
3052 
3053 	addr = R_AX_PWR_BY_RATE;
3054 	for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
3055 		for (i = 0; i < ARRAY_SIZE(rs); i++) {
3056 			if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]])
3057 				continue;
3058 
3059 			cur.rs = rs[i];
3060 			for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]];
3061 			     cur.idx++) {
3062 				v[cur.idx % 4] =
3063 					rtw89_phy_read_txpwr_byrate(rtwdev,
3064 								    band, 0,
3065 								    &cur);
3066 
3067 				if ((cur.idx + 1) % 4)
3068 					continue;
3069 
3070 				val = FIELD_PREP(GENMASK(7, 0), v[0]) |
3071 				      FIELD_PREP(GENMASK(15, 8), v[1]) |
3072 				      FIELD_PREP(GENMASK(23, 16), v[2]) |
3073 				      FIELD_PREP(GENMASK(31, 24), v[3]);
3074 
3075 				rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr,
3076 							val);
3077 				addr += 4;
3078 			}
3079 		}
3080 	}
3081 }
3082 
3083 static
rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)3084 void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev,
3085 				   const struct rtw89_chan *chan,
3086 				   enum rtw89_phy_idx phy_idx)
3087 {
3088 	struct rtw89_rate_desc desc = {
3089 		.nss = RTW89_NSS_1,
3090 		.rs = RTW89_RS_OFFSET,
3091 	};
3092 	u8 band = chan->band_type;
3093 	s8 v[RTW89_RATE_OFFSET_NUM_AX] = {};
3094 	u32 val;
3095 
3096 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
3097 
3098 	for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++)
3099 		v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc);
3100 
3101 	BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5);
3102 	val = FIELD_PREP(GENMASK(3, 0), v[0]) |
3103 	      FIELD_PREP(GENMASK(7, 4), v[1]) |
3104 	      FIELD_PREP(GENMASK(11, 8), v[2]) |
3105 	      FIELD_PREP(GENMASK(15, 12), v[3]) |
3106 	      FIELD_PREP(GENMASK(19, 16), v[4]);
3107 
3108 	rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
3109 				     GENMASK(19, 0), val);
3110 }
3111 
rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)3112 static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev,
3113 					 const struct rtw89_chan *chan,
3114 					 enum rtw89_phy_idx phy_idx)
3115 {
3116 	u8 max_ntx_num = rtwdev->chip->rf_path_num;
3117 	struct rtw89_txpwr_limit_ax lmt;
3118 	u8 ch = chan->channel;
3119 	u8 bw = chan->band_width;
3120 	const s8 *ptr;
3121 	u32 addr, val;
3122 	u8 i, j;
3123 
3124 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
3125 		    "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
3126 
3127 	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) !=
3128 		     RTW89_TXPWR_LMT_PAGE_SIZE_AX);
3129 
3130 	addr = R_AX_PWR_LMT;
3131 	for (i = 0; i < max_ntx_num; i++) {
3132 		rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i);
3133 
3134 		ptr = (s8 *)&lmt;
3135 		for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX;
3136 		     j += 4, addr += 4, ptr += 4) {
3137 			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
3138 			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
3139 			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
3140 			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
3141 
3142 			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
3143 		}
3144 	}
3145 }
3146 
rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev * rtwdev,const struct rtw89_chan * chan,enum rtw89_phy_idx phy_idx)3147 static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev,
3148 					    const struct rtw89_chan *chan,
3149 					    enum rtw89_phy_idx phy_idx)
3150 {
3151 	u8 max_ntx_num = rtwdev->chip->rf_path_num;
3152 	struct rtw89_txpwr_limit_ru_ax lmt_ru;
3153 	u8 ch = chan->channel;
3154 	u8 bw = chan->band_width;
3155 	const s8 *ptr;
3156 	u32 addr, val;
3157 	u8 i, j;
3158 
3159 	rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
3160 		    "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
3161 
3162 	BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) !=
3163 		     RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX);
3164 
3165 	addr = R_AX_PWR_RU_LMT;
3166 	for (i = 0; i < max_ntx_num; i++) {
3167 		rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i);
3168 
3169 		ptr = (s8 *)&lmt_ru;
3170 		for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX;
3171 		     j += 4, addr += 4, ptr += 4) {
3172 			val = FIELD_PREP(GENMASK(7, 0), ptr[0]) |
3173 			      FIELD_PREP(GENMASK(15, 8), ptr[1]) |
3174 			      FIELD_PREP(GENMASK(23, 16), ptr[2]) |
3175 			      FIELD_PREP(GENMASK(31, 24), ptr[3]);
3176 
3177 			rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
3178 		}
3179 	}
3180 }
3181 
3182 struct rtw89_phy_iter_ra_data {
3183 	struct rtw89_dev *rtwdev;
3184 	struct sk_buff *c2h;
3185 };
3186 
__rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link * rtwsta_link,struct ieee80211_link_sta * link_sta,struct rtw89_phy_iter_ra_data * ra_data)3187 static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link,
3188 					struct ieee80211_link_sta *link_sta,
3189 					struct rtw89_phy_iter_ra_data *ra_data)
3190 {
3191 	struct rtw89_dev *rtwdev = ra_data->rtwdev;
3192 	const struct rtw89_c2h_ra_rpt *c2h =
3193 		(const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data;
3194 	struct rtw89_ra_report *ra_report = &rtwsta_link->ra_report;
3195 	const struct rtw89_chip_info *chip = rtwdev->chip;
3196 	bool format_v1 = chip->chip_gen == RTW89_CHIP_BE;
3197 	u8 mode, rate, bw, giltf, mac_id;
3198 	u16 legacy_bitrate;
3199 	bool valid;
3200 	u8 mcs = 0;
3201 	u8 t;
3202 
3203 	mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID);
3204 	if (mac_id != rtwsta_link->mac_id)
3205 		return;
3206 
3207 	rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS);
3208 	bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW);
3209 	giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF);
3210 	mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL);
3211 
3212 	if (format_v1) {
3213 		t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7);
3214 		rate |= u8_encode_bits(t, BIT(7));
3215 		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2);
3216 		bw |= u8_encode_bits(t, BIT(2));
3217 		t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2);
3218 		mode |= u8_encode_bits(t, BIT(2));
3219 	}
3220 
3221 	if (mode == RTW89_RA_RPT_MODE_LEGACY) {
3222 		valid = rtw89_legacy_rate_to_bitrate(rtwdev, rate, &legacy_bitrate);
3223 		if (!valid)
3224 			return;
3225 	}
3226 
3227 	memset(&ra_report->txrate, 0, sizeof(ra_report->txrate));
3228 
3229 	switch (mode) {
3230 	case RTW89_RA_RPT_MODE_LEGACY:
3231 		ra_report->txrate.legacy = legacy_bitrate;
3232 		break;
3233 	case RTW89_RA_RPT_MODE_HT:
3234 		ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
3235 		if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw))
3236 			rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
3237 						FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
3238 		else
3239 			rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate);
3240 		ra_report->txrate.mcs = rate;
3241 		if (giltf)
3242 			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3243 		mcs = ra_report->txrate.mcs & 0x07;
3244 		break;
3245 	case RTW89_RA_RPT_MODE_VHT:
3246 		ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
3247 		ra_report->txrate.mcs = format_v1 ?
3248 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
3249 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
3250 		ra_report->txrate.nss = format_v1 ?
3251 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
3252 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
3253 		if (giltf)
3254 			ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
3255 		mcs = ra_report->txrate.mcs;
3256 		break;
3257 	case RTW89_RA_RPT_MODE_HE:
3258 		ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
3259 		ra_report->txrate.mcs = format_v1 ?
3260 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) :
3261 			u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS);
3262 		ra_report->txrate.nss  = format_v1 ?
3263 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 :
3264 			u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1;
3265 		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
3266 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8;
3267 		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
3268 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
3269 		else
3270 			ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
3271 		mcs = ra_report->txrate.mcs;
3272 		break;
3273 	case RTW89_RA_RPT_MODE_EHT:
3274 		ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS;
3275 		ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1);
3276 		ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1;
3277 		if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
3278 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8;
3279 		else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
3280 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6;
3281 		else
3282 			ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2;
3283 		mcs = ra_report->txrate.mcs;
3284 		break;
3285 	}
3286 
3287 	ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw);
3288 	ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
3289 	ra_report->hw_rate = format_v1 ?
3290 			     u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) |
3291 			     u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) :
3292 			     u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) |
3293 			     u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL);
3294 	ra_report->might_fallback_legacy = mcs <= 2;
3295 	link_sta->agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
3296 	rtwsta_link->max_agg_wait = link_sta->agg.max_rc_amsdu_len / 1500 - 1;
3297 }
3298 
rtw89_phy_c2h_ra_rpt_iter(void * data,struct ieee80211_sta * sta)3299 static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
3300 {
3301 	struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
3302 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
3303 	struct rtw89_sta_link *rtwsta_link;
3304 	struct ieee80211_link_sta *link_sta;
3305 	unsigned int link_id;
3306 
3307 	rcu_read_lock();
3308 
3309 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
3310 		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false);
3311 		__rtw89_phy_c2h_ra_rpt_iter(rtwsta_link, link_sta, ra_data);
3312 	}
3313 
3314 	rcu_read_unlock();
3315 }
3316 
3317 static void
rtw89_phy_c2h_ra_rpt(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3318 rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3319 {
3320 	struct rtw89_phy_iter_ra_data ra_data;
3321 
3322 	ra_data.rtwdev = rtwdev;
3323 	ra_data.c2h = c2h;
3324 	ieee80211_iterate_stations_atomic(rtwdev->hw,
3325 					  rtw89_phy_c2h_ra_rpt_iter,
3326 					  &ra_data);
3327 }
3328 
3329 static
3330 void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
3331 					  struct sk_buff *c2h, u32 len) = {
3332 	[RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
3333 	[RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
3334 	[RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
3335 	[RTW89_PHY_C2H_FUNC_ACCELERATE_EN] = rtw89_fw_c2h_dummy_handler,
3336 };
3337 
3338 static void
rtw89_phy_c2h_lowrt_rty(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3339 rtw89_phy_c2h_lowrt_rty(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3340 {
3341 }
3342 
3343 static void
rtw89_phy_c2h_lps_rpt(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3344 rtw89_phy_c2h_lps_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3345 {
3346 	const struct rtw89_c2h_lps_rpt *c2h_rpt = (const void *)c2h->data;
3347 	const __le32 *data_a, *data_b;
3348 	u16 len_info, cr_len, idx;
3349 	const __le16 *addr;
3350 	const u8 *info;
3351 
3352 	/* elements size of BBCR/BBMCUCR/RFCR are 6/6/10 bytes respectively */
3353 	cr_len = c2h_rpt->cnt_bbcr * 6 +
3354 		 c2h_rpt->cnt_bbmcucr * 6 +
3355 		 c2h_rpt->cnt_rfcr * 10;
3356 	len_info = len - (sizeof(*c2h_rpt) + cr_len);
3357 
3358 	if (len < sizeof(*c2h_rpt) + cr_len || len_info % 4 != 0) {
3359 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3360 			    "Invalid LPS RPT len(%d) TYPE(%d) CRCNT: BB(%d) MCU(%d) RF(%d)\n",
3361 			    len, c2h_rpt->type, c2h_rpt->cnt_bbcr,
3362 			    c2h_rpt->cnt_bbmcucr, c2h_rpt->cnt_rfcr);
3363 		return;
3364 	}
3365 
3366 	rtw89_debug(rtwdev, RTW89_DBG_PS,
3367 		    "LPS RPT TYPE(%d), CRCNT: BB(%d) MCU(%d) RF(%d)\n",
3368 		    c2h_rpt->type, c2h_rpt->cnt_bbcr,
3369 		    c2h_rpt->cnt_bbmcucr, c2h_rpt->cnt_rfcr);
3370 
3371 	info = &c2h_rpt->data[0];
3372 	for (idx = 0; idx < len_info; idx += 4, info += 4)
3373 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3374 			    "BB LPS INFO (%02d) - 0x%02x,0x%02x,0x%02x,0x%02x\n",
3375 			    idx, info[3], info[2], info[1], info[0]);
3376 
3377 	addr = (const void *)(info);
3378 	data_a = (const void *)(addr + c2h_rpt->cnt_bbcr);
3379 	for (idx = 0; idx < c2h_rpt->cnt_bbcr; idx++, addr++, data_a++)
3380 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3381 			    "LPS BB CR - 0x%04x=0x%08x\n",
3382 			    le16_to_cpu(*addr), le32_to_cpu(*data_a));
3383 
3384 	addr = (const void *)data_a;
3385 	data_a = (const void *)(addr + c2h_rpt->cnt_bbmcucr);
3386 	for (idx = 0; idx < c2h_rpt->cnt_bbmcucr; idx++, addr++, data_a++)
3387 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3388 			    "LPS BBMCU - 0x%04x=0x%08x\n",
3389 			    le16_to_cpu(*addr), le32_to_cpu(*data_a));
3390 
3391 	addr = (const void *)data_a;
3392 	data_a = (const void *)(addr + c2h_rpt->cnt_rfcr);
3393 	data_b = (const void *)(data_a + c2h_rpt->cnt_rfcr);
3394 	for (idx = 0; idx < c2h_rpt->cnt_rfcr; idx++, addr++, data_a++, data_b++)
3395 		rtw89_debug(rtwdev, RTW89_DBG_PS,
3396 			    "LPS RFCR - 0x%04x=0x%05x,0x%05x\n",
3397 			    le16_to_cpu(*addr), le32_to_cpu(*data_a),
3398 			    le32_to_cpu(*data_b));
3399 }
3400 
3401 static void
rtw89_phy_c2h_fw_scan_rpt(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3402 rtw89_phy_c2h_fw_scan_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3403 {
3404 	const struct rtw89_c2h_fw_scan_rpt *c2h_rpt =
3405 		(const struct rtw89_c2h_fw_scan_rpt *)c2h->data;
3406 
3407 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
3408 		    "%s: band: %u, op_chan: %u, PD_low_bd(ofdm, cck): (-%d, %d), phy_idx: %u\n",
3409 		    __func__, c2h_rpt->band, c2h_rpt->center_ch,
3410 		    PD_LOWER_BOUND_BASE - (c2h_rpt->ofdm_pd_idx << 1),
3411 		    c2h_rpt->cck_pd_idx, c2h_rpt->phy_idx);
3412 }
3413 
3414 static
3415 void (* const rtw89_phy_c2h_dm_handler[])(struct rtw89_dev *rtwdev,
3416 					  struct sk_buff *c2h, u32 len) = {
3417 	[RTW89_PHY_C2H_DM_FUNC_FW_TEST] = NULL,
3418 	[RTW89_PHY_C2H_DM_FUNC_FW_TRIG_TX_RPT] = NULL,
3419 	[RTW89_PHY_C2H_DM_FUNC_SIGB] = NULL,
3420 	[RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY] = rtw89_phy_c2h_lowrt_rty,
3421 	[RTW89_PHY_C2H_DM_FUNC_MCC_DIG] = NULL,
3422 	[RTW89_PHY_C2H_DM_FUNC_LPS] = rtw89_phy_c2h_lps_rpt,
3423 	[RTW89_PHY_C2H_DM_FUNC_ENV_MNTR] = rtw89_fw_c2h_dummy_handler,
3424 	[RTW89_PHY_C2H_DM_FUNC_FW_SCAN] = rtw89_phy_c2h_fw_scan_rpt,
3425 };
3426 
3427 static
rtw89_phy_c2h_rfk_tas_pwr(struct rtw89_dev * rtwdev,const struct rtw89_c2h_rf_tas_rpt_log * content)3428 void rtw89_phy_c2h_rfk_tas_pwr(struct rtw89_dev *rtwdev,
3429 			       const struct rtw89_c2h_rf_tas_rpt_log *content)
3430 {
3431 	const enum rtw89_sar_sources src = rtwdev->sar.src;
3432 	struct rtw89_tas_info *tas = &rtwdev->tas;
3433 	u64 linear = 0;
3434 	u32 i, cur_idx;
3435 	s16 txpwr;
3436 
3437 	if (!tas->enable || src == RTW89_SAR_SOURCE_NONE)
3438 		return;
3439 
3440 	cur_idx = le32_to_cpu(content->cur_idx);
3441 	for (i = 0; i < cur_idx; i++) {
3442 		txpwr = le16_to_cpu(content->txpwr_history[i]);
3443 		linear += rtw89_db_quarter_to_linear(txpwr);
3444 
3445 		rtw89_debug(rtwdev, RTW89_DBG_SAR,
3446 			    "tas: index: %u, txpwr: %d\n", i, txpwr);
3447 	}
3448 
3449 	if (cur_idx == 0)
3450 		tas->instant_txpwr = rtw89_db_to_linear(0);
3451 	else
3452 		tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx);
3453 }
3454 
rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev * rtwdev,enum rtw89_phy_c2h_rfk_log_func func,void * content,u16 len)3455 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev,
3456 				      enum rtw89_phy_c2h_rfk_log_func func,
3457 				      void *content, u16 len)
3458 {
3459 	struct rtw89_c2h_rf_txgapk_rpt_log *txgapk;
3460 	struct rtw89_c2h_rf_rxdck_rpt_log *rxdck;
3461 	struct rtw89_c2h_rf_txiqk_rpt_log *txiqk;
3462 	struct rtw89_c2h_rf_cim3k_rpt_log *cim3k;
3463 	struct rtw89_c2h_rf_dack_rpt_log *dack;
3464 	struct rtw89_c2h_rf_tssi_rpt_log *tssi;
3465 	struct rtw89_c2h_rf_dpk_rpt_log *dpk;
3466 	struct rtw89_c2h_rf_iqk_rpt_log *iqk;
3467 	int i, j, k;
3468 
3469 	switch (func) {
3470 	case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK:
3471 		if (len != sizeof(*iqk))
3472 			goto out;
3473 
3474 		iqk = content;
3475 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3476 			    "[IQK] iqk->is_iqk_init = %x\n", iqk->is_iqk_init);
3477 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3478 			    "[IQK] iqk->is_reload = %x\n", iqk->is_reload);
3479 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3480 			    "[IQK] iqk->is_nbiqk = %x\n", iqk->is_nbiqk);
3481 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3482 			    "[IQK] iqk->txiqk_en = %x\n", iqk->txiqk_en);
3483 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3484 			    "[IQK] iqk->rxiqk_en = %x\n", iqk->rxiqk_en);
3485 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3486 			    "[IQK] iqk->lok_en = %x\n", iqk->lok_en);
3487 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3488 			    "[IQK] iqk->iqk_xym_en = %x\n", iqk->iqk_xym_en);
3489 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3490 			    "[IQK] iqk->iqk_sram_en = %x\n", iqk->iqk_sram_en);
3491 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3492 			    "[IQK] iqk->iqk_fft_en = %x\n", iqk->iqk_fft_en);
3493 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3494 			    "[IQK] iqk->is_fw_iqk = %x\n", iqk->is_fw_iqk);
3495 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3496 			    "[IQK] iqk->is_iqk_enable = %x\n", iqk->is_iqk_enable);
3497 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3498 			    "[IQK] iqk->iqk_cfir_en = %x\n", iqk->iqk_cfir_en);
3499 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3500 			    "[IQK] iqk->thermal_rek_en = %x\n", iqk->thermal_rek_en);
3501 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3502 			    "[IQK] iqk->version = %x\n", iqk->version);
3503 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3504 			    "[IQK] iqk->phy = %x\n", iqk->phy);
3505 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3506 			    "[IQK] iqk->fwk_status = %x\n", iqk->fwk_status);
3507 
3508 		for (i = 0; i < 2; i++) {
3509 			rtw89_debug(rtwdev, RTW89_DBG_RFK,
3510 				    "[IQK] ======== Path %x  ========\n", i);
3511 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_band[%d] = %x\n",
3512 				    i, iqk->iqk_band[i]);
3513 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_ch[%d] = %x\n",
3514 				    i, iqk->iqk_ch[i]);
3515 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_bw[%d] = %x\n",
3516 				    i, iqk->iqk_bw[i]);
3517 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->rf_0x18[%d] = %x\n",
3518 				    i, le32_to_cpu(iqk->rf_0x18[i]));
3519 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_idac[%d] = %x\n",
3520 				    i, le32_to_cpu(iqk->lok_idac[i]));
3521 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_vbuf[%d] = %x\n",
3522 				    i, le32_to_cpu(iqk->lok_vbuf[i]));
3523 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_tx_fail[%d] = %x\n",
3524 				    i, iqk->iqk_tx_fail[i]);
3525 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_rx_fail[%d] = %x\n",
3526 				    i, iqk->iqk_rx_fail[i]);
3527 			for (j = 0; j < 6; j++)
3528 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3529 					    "[IQK] iqk->rftxgain[%d][%d] = %x\n",
3530 					    i, j, le32_to_cpu(iqk->rftxgain[i][j]));
3531 			for (j = 0; j < 6; j++)
3532 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3533 					    "[IQK] iqk->tx_xym[%d][%d] = %x\n",
3534 					    i, j, le32_to_cpu(iqk->tx_xym[i][j]));
3535 			for (j = 0; j < 6; j++)
3536 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3537 					    "[IQK] iqk->rfrxgain[%d][%d] = %x\n",
3538 					    i, j, le32_to_cpu(iqk->rfrxgain[i][j]));
3539 			for (j = 0; j < 6; j++)
3540 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3541 					    "[IQK] iqk->rx_xym[%d][%d] = %x\n",
3542 					    i, j, le32_to_cpu(iqk->rx_xym[i][j]));
3543 
3544 			if (!iqk->iqk_xym_en)
3545 				continue;
3546 
3547 			for (j = 0; j < 32; j++)
3548 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3549 					    "[IQK] iqk->rx_wb_xym[%d][%d] = %x\n",
3550 					    i, j, iqk->rx_wb_xym[i][j]);
3551 		}
3552 		return;
3553 	case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
3554 		if (len != sizeof(*dpk))
3555 			goto out;
3556 
3557 		dpk = content;
3558 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3559 			    "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n",
3560 			    dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok);
3561 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3562 			    "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n",
3563 			    dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q);
3564 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3565 			    "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n",
3566 			    dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov);
3567 		return;
3568 	case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
3569 		if (len != sizeof(*dack))
3570 			goto out;
3571 
3572 		dack = content;
3573 
3574 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]FWDACK SUMMARY!!!!!\n");
3575 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3576 			    "[DACK]FWDACK ver = 0x%x, FWDACK rpt_ver = 0x%x, driver rpt_ver = 0x%x\n",
3577 			    dack->fwdack_ver, dack->fwdack_info_ver, 0x2);
3578 
3579 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3580 			    "[DACK]timeout code = [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
3581 			    dack->addck_timeout, dack->cdack_timeout, dack->dadck_timeout,
3582 			    dack->adgaink_timeout, dack->msbk_timeout);
3583 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3584 			    "[DACK]DACK fail = 0x%x\n", dack->dack_fail);
3585 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3586 			    "[DACK]S0 WBADCK = [0x%x]\n", dack->wbdck_d[0]);
3587 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3588 			    "[DACK]S1 WBADCK = [0x%x]\n", dack->wbdck_d[1]);
3589 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3590 			    "[DACK]DRCK = [0x%x]\n", dack->rck_d);
3591 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n",
3592 			    dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]);
3593 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n",
3594 			    dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]);
3595 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n",
3596 			    dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]);
3597 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n",
3598 			    dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]);
3599 
3600 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n",
3601 			    ((u32)dack->addck2_hd[0][0][0] << 8) | dack->addck2_ld[0][0][0],
3602 			    ((u32)dack->addck2_hd[0][0][1] << 8) | dack->addck2_ld[0][0][1]);
3603 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n",
3604 			    ((u32)dack->addck2_hd[0][1][0] << 8) | dack->addck2_ld[0][1][0],
3605 			    ((u32)dack->addck2_hd[0][1][1] << 8) | dack->addck2_ld[0][1][1]);
3606 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n",
3607 			    ((u32)dack->addck2_hd[1][0][0] << 8) | dack->addck2_ld[1][0][0],
3608 			    ((u32)dack->addck2_hd[1][0][1] << 8) | dack->addck2_ld[1][0][1]);
3609 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n",
3610 			    ((u32)dack->addck2_hd[1][1][0] << 8) | dack->addck2_ld[1][1][0],
3611 			    ((u32)dack->addck2_hd[1][1][1] << 8) | dack->addck2_ld[1][1][1]);
3612 
3613 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
3614 			    dack->adgaink_d[0][0], dack->adgaink_d[0][1]);
3615 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n",
3616 			    dack->adgaink_d[1][0], dack->adgaink_d[1][1]);
3617 
3618 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
3619 			    dack->dadck_d[0][0], dack->dadck_d[0][1]);
3620 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
3621 			    dack->dadck_d[1][0], dack->dadck_d[1][1]);
3622 
3623 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n",
3624 			    ((u32)dack->biask_hd[0][0] << 8) | dack->biask_ld[0][0]);
3625 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n",
3626 			    ((u32)dack->biask_hd[1][0] << 8) | dack->biask_ld[1][0]);
3627 
3628 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
3629 		for (i = 0; i < 0x10; i++)
3630 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3631 				    dack->msbk_d[0][0][i]);
3632 
3633 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
3634 		for (i = 0; i < 0x10; i++)
3635 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3636 				    dack->msbk_d[0][1][i]);
3637 
3638 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
3639 		for (i = 0; i < 0x10; i++)
3640 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3641 				    dack->msbk_d[1][0][i]);
3642 
3643 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
3644 		for (i = 0; i < 0x10; i++)
3645 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n",
3646 				    dack->msbk_d[1][1][i]);
3647 		return;
3648 	case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
3649 		if (len != sizeof(*rxdck))
3650 			goto out;
3651 
3652 		rxdck = content;
3653 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3654 			    "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n",
3655 			    rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch,
3656 			    rxdck->timeout);
3657 		return;
3658 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
3659 		if (len != sizeof(*tssi))
3660 			goto out;
3661 
3662 		tssi = content;
3663 		for (i = 0; i < 2; i++) {
3664 			for (j = 0; j < 2; j++) {
3665 				for (k = 0; k < 4; k++) {
3666 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3667 						    "[TSSI] alignment_power_cw_h[%d][%d][%d]=%d\n",
3668 						    i, j, k, tssi->alignment_power_cw_h[i][j][k]);
3669 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3670 						    "[TSSI] alignment_power_cw_l[%d][%d][%d]=%d\n",
3671 						    i, j, k, tssi->alignment_power_cw_l[i][j][k]);
3672 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3673 						    "[TSSI] alignment_power[%d][%d][%d]=%d\n",
3674 						    i, j, k, tssi->alignment_power[i][j][k]);
3675 					rtw89_debug(rtwdev, RTW89_DBG_RFK,
3676 						    "[TSSI] alignment_power_cw[%d][%d][%d]=%d\n",
3677 						    i, j, k,
3678 						    (tssi->alignment_power_cw_h[i][j][k] << 8) +
3679 						     tssi->alignment_power_cw_l[i][j][k]);
3680 				}
3681 
3682 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3683 					    "[TSSI] tssi_alimk_state[%d][%d]=%d\n",
3684 					    i, j, tssi->tssi_alimk_state[i][j]);
3685 				rtw89_debug(rtwdev, RTW89_DBG_RFK,
3686 					    "[TSSI] default_txagc_offset[%d]=%d\n",
3687 					    j, tssi->default_txagc_offset[0][j]);
3688 			}
3689 		}
3690 		return;
3691 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
3692 		if (len != sizeof(*txgapk))
3693 			goto out;
3694 
3695 		txgapk = content;
3696 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
3697 			    "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n",
3698 			    le32_to_cpu(txgapk->r0x8010[0]),
3699 			    le32_to_cpu(txgapk->r0x8010[1]));
3700 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n",
3701 			    txgapk->chk_id);
3702 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n",
3703 			    le32_to_cpu(txgapk->chk_cnt));
3704 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n",
3705 			    txgapk->ver);
3706 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt d_bnd_ok = %d\n",
3707 			    txgapk->d_bnd_ok);
3708 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt stage[0] = 0x%x\n",
3709 			    le32_to_cpu(txgapk->stage[0]));
3710 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt stage[1] = 0x%x\n",
3711 			    le32_to_cpu(txgapk->stage[1]));
3712 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]failcode[0] = 0x%x\n",
3713 			    le16_to_cpu(txgapk->failcode[0]));
3714 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]failcode[1] = 0x%x\n",
3715 			    le16_to_cpu(txgapk->failcode[1]));
3716 
3717 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n",
3718 			    (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]);
3719 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n",
3720 			    (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]);
3721 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n",
3722 			    (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]);
3723 		rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n",
3724 			    (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]);
3725 		return;
3726 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR:
3727 		if (len != sizeof(struct rtw89_c2h_rf_tas_rpt_log))
3728 			goto out;
3729 
3730 		rtw89_phy_c2h_rfk_tas_pwr(rtwdev, content);
3731 		return;
3732 	case RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK:
3733 		if (len != sizeof(*txiqk))
3734 			goto out;
3735 		return;
3736 	case RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K:
3737 		if (len != sizeof(*cim3k))
3738 			goto out;
3739 		return;
3740 	default:
3741 		break;
3742 	}
3743 
3744 out:
3745 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3746 		    "unexpected RFK func %d report log with length %d\n", func, len);
3747 }
3748 
rtw89_phy_c2h_rfk_run_log(struct rtw89_dev * rtwdev,enum rtw89_phy_c2h_rfk_log_func func,void * content,u16 len)3749 static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev,
3750 				      enum rtw89_phy_c2h_rfk_log_func func,
3751 				      void *content, u16 len)
3752 {
3753 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
3754 	const struct rtw89_c2h_rf_run_log *log = content;
3755 	const struct rtw89_fw_element_hdr *elm;
3756 	u32 fmt_idx;
3757 	u16 offset;
3758 
3759 	if (sizeof(*log) != len)
3760 		return false;
3761 
3762 	if (!elm_info->rfk_log_fmt)
3763 		return false;
3764 
3765 	elm = elm_info->rfk_log_fmt->elm[func];
3766 	fmt_idx = le32_to_cpu(log->fmt_idx);
3767 	if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr)
3768 		return false;
3769 
3770 	offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]);
3771 	if (offset == 0)
3772 		return false;
3773 
3774 	rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset],
3775 		    le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]),
3776 		    le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3]));
3777 
3778 	return true;
3779 }
3780 
rtw89_phy_c2h_rfk_log(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len,enum rtw89_phy_c2h_rfk_log_func func,const char * rfk_name)3781 static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
3782 				  u32 len, enum rtw89_phy_c2h_rfk_log_func func,
3783 				  const char *rfk_name)
3784 {
3785 	struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data;
3786 	struct rtw89_c2h_rf_log_hdr *log_hdr;
3787 	void *log_ptr = c2h_hdr;
3788 	u16 content_len;
3789 	u16 chunk_len;
3790 	bool handled;
3791 
3792 	log_ptr += sizeof(*c2h_hdr);
3793 	len -= sizeof(*c2h_hdr);
3794 
3795 	while (len > sizeof(*log_hdr)) {
3796 		log_hdr = log_ptr;
3797 		content_len = le16_to_cpu(log_hdr->len);
3798 		chunk_len = content_len + sizeof(*log_hdr);
3799 
3800 		if (chunk_len > len)
3801 			break;
3802 
3803 		switch (log_hdr->type) {
3804 		case RTW89_RF_RUN_LOG:
3805 			handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func,
3806 							    log_hdr->content, content_len);
3807 			if (handled)
3808 				break;
3809 
3810 			rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n",
3811 				    rfk_name, content_len, log_hdr->content);
3812 			break;
3813 		case RTW89_RF_RPT_LOG:
3814 			rtw89_phy_c2h_rfk_rpt_log(rtwdev, func,
3815 						  log_hdr->content, content_len);
3816 			break;
3817 		default:
3818 			return;
3819 		}
3820 
3821 		log_ptr += chunk_len;
3822 		len -= chunk_len;
3823 	}
3824 }
3825 
3826 static void
rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3827 rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3828 {
3829 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3830 			      RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK");
3831 }
3832 
3833 static void
rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3834 rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3835 {
3836 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3837 			      RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK");
3838 }
3839 
3840 static void
rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3841 rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3842 {
3843 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3844 			      RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK");
3845 }
3846 
3847 static void
rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3848 rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3849 {
3850 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3851 			      RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK");
3852 }
3853 
3854 static void
rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3855 rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3856 {
3857 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3858 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI");
3859 }
3860 
3861 static void
rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3862 rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3863 {
3864 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3865 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK");
3866 }
3867 
3868 static void
rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3869 rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3870 {
3871 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3872 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR, "TAS");
3873 }
3874 
3875 static void
rtw89_phy_c2h_rfk_log_txiqk(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3876 rtw89_phy_c2h_rfk_log_txiqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3877 {
3878 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3879 			      RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK, "TXIQK");
3880 }
3881 
3882 static void
rtw89_phy_c2h_rfk_log_cim3k(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3883 rtw89_phy_c2h_rfk_log_cim3k(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3884 {
3885 	rtw89_phy_c2h_rfk_log(rtwdev, c2h, len,
3886 			      RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K, "CIM3K");
3887 }
3888 
3889 static
3890 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev,
3891 					       struct sk_buff *c2h, u32 len) = {
3892 	[RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk,
3893 	[RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk,
3894 	[RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack,
3895 	[RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck,
3896 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi,
3897 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk,
3898 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr,
3899 	[RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK] = rtw89_phy_c2h_rfk_log_txiqk,
3900 	[RTW89_PHY_C2H_RFK_LOG_FUNC_CIM3K] = rtw89_phy_c2h_rfk_log_cim3k,
3901 };
3902 
3903 static
rtw89_phy_rfk_report_prep(struct rtw89_dev * rtwdev)3904 void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev)
3905 {
3906 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3907 
3908 	wait->state = RTW89_RFK_STATE_START;
3909 	wait->start_time = ktime_get();
3910 	reinit_completion(&wait->completion);
3911 }
3912 
3913 static
rtw89_phy_rfk_report_wait(struct rtw89_dev * rtwdev,const char * rfk_name,unsigned int ms)3914 int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name,
3915 			      unsigned int ms)
3916 {
3917 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3918 	unsigned long time_left;
3919 
3920 	/* Since we can't receive C2H event during SER, use a fixed delay. */
3921 	if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
3922 		fsleep(1000 * ms / 2);
3923 		goto out;
3924 	}
3925 
3926 	time_left = wait_for_completion_timeout(&wait->completion,
3927 						msecs_to_jiffies(ms));
3928 	if (time_left == 0) {
3929 		rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name);
3930 		return -ETIMEDOUT;
3931 	} else if (wait->state != RTW89_RFK_STATE_OK) {
3932 		rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n",
3933 			   rfk_name, wait->state);
3934 		return -EFAULT;
3935 	}
3936 
3937 out:
3938 	rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n",
3939 		    rfk_name, ktime_ms_delta(ktime_get(), wait->start_time));
3940 
3941 	return 0;
3942 }
3943 
3944 static void
rtw89_phy_c2h_rfk_report_state(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3945 rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3946 {
3947 	const struct rtw89_c2h_rfk_report *report =
3948 		(const struct rtw89_c2h_rfk_report *)c2h->data;
3949 	struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait;
3950 
3951 	wait->state = report->state;
3952 	wait->version = report->version;
3953 
3954 	complete(&wait->completion);
3955 
3956 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
3957 		    "RFK report state %d with version %d (%*ph)\n",
3958 		    wait->state, wait->version,
3959 		    (int)(len - sizeof(report->hdr)), &report->state);
3960 }
3961 
3962 static void
rtw89_phy_c2h_rfk_report_tas_pwr(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)3963 rtw89_phy_c2h_rfk_report_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
3964 {
3965 	const struct rtw89_c2h_rf_tas_info *report =
3966 		(const struct rtw89_c2h_rf_tas_info *)c2h->data;
3967 
3968 	rtw89_phy_c2h_rfk_tas_pwr(rtwdev, &report->content);
3969 }
3970 
3971 static
3972 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev,
3973 						  struct sk_buff *c2h, u32 len) = {
3974 	[RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state,
3975 	[RTW89_PHY_C2H_RFK_REPORT_FUNC_TAS_PWR] = rtw89_phy_c2h_rfk_report_tas_pwr,
3976 };
3977 
rtw89_phy_c2h_chk_atomic(struct rtw89_dev * rtwdev,u8 class,u8 func)3978 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func)
3979 {
3980 	switch (class) {
3981 	case RTW89_PHY_C2H_RFK_LOG:
3982 		switch (func) {
3983 		case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK:
3984 		case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK:
3985 		case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK:
3986 		case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK:
3987 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI:
3988 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK:
3989 		case RTW89_PHY_C2H_RFK_LOG_FUNC_TXIQK:
3990 			return true;
3991 		default:
3992 			return false;
3993 		}
3994 	case RTW89_PHY_C2H_RFK_REPORT:
3995 		switch (func) {
3996 		case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE:
3997 			return true;
3998 		default:
3999 			return false;
4000 		}
4001 	default:
4002 		return false;
4003 	}
4004 }
4005 
rtw89_phy_c2h_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb,u32 len,u8 class,u8 func)4006 void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
4007 			  u32 len, u8 class, u8 func)
4008 {
4009 	void (*handler)(struct rtw89_dev *rtwdev,
4010 			struct sk_buff *c2h, u32 len) = NULL;
4011 
4012 	switch (class) {
4013 	case RTW89_PHY_C2H_CLASS_RA:
4014 		if (func < ARRAY_SIZE(rtw89_phy_c2h_ra_handler))
4015 			handler = rtw89_phy_c2h_ra_handler[func];
4016 		break;
4017 	case RTW89_PHY_C2H_RFK_LOG:
4018 		if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler))
4019 			handler = rtw89_phy_c2h_rfk_log_handler[func];
4020 		break;
4021 	case RTW89_PHY_C2H_RFK_REPORT:
4022 		if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler))
4023 			handler = rtw89_phy_c2h_rfk_report_handler[func];
4024 		break;
4025 	case RTW89_PHY_C2H_CLASS_DM:
4026 		if (func < ARRAY_SIZE(rtw89_phy_c2h_dm_handler))
4027 			handler = rtw89_phy_c2h_dm_handler[func];
4028 		break;
4029 	default:
4030 		break;
4031 	}
4032 	if (!handler) {
4033 		rtw89_info_once(rtwdev, "PHY c2h class %d func %d not support\n",
4034 				class, func);
4035 		return;
4036 	}
4037 	handler(rtwdev, skb, len);
4038 }
4039 
rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,unsigned int ms)4040 int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev,
4041 				    enum rtw89_phy_idx phy_idx,
4042 				    unsigned int ms)
4043 {
4044 	int ret;
4045 
4046 	if (RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY, &rtwdev->fw)) {
4047 		rtw89_phy_rfk_report_prep(rtwdev);
4048 		rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx);
4049 		ret = rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms);
4050 		if (ret)
4051 			return ret;
4052 	}
4053 
4054 	if (RTW89_CHK_FW_FEATURE_GROUP(WITH_RFK_PRE_NOTIFY_MCC, &rtwdev->fw)) {
4055 		ret = rtw89_fw_h2c_rf_pre_ntfy_mcc(rtwdev, phy_idx);
4056 		if (ret)
4057 			return ret;
4058 	}
4059 
4060 	return 0;
4061 
4062 }
4063 EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait);
4064 
rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,enum rtw89_tssi_mode tssi_mode,unsigned int ms)4065 int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev,
4066 				enum rtw89_phy_idx phy_idx,
4067 				const struct rtw89_chan *chan,
4068 				enum rtw89_tssi_mode tssi_mode,
4069 				unsigned int ms)
4070 {
4071 	int ret;
4072 
4073 	rtw89_phy_rfk_report_prep(rtwdev);
4074 
4075 	ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode);
4076 	if (ret)
4077 		return ret;
4078 
4079 	return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms);
4080 }
4081 EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait);
4082 
rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,unsigned int ms)4083 int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev,
4084 			       enum rtw89_phy_idx phy_idx,
4085 			       const struct rtw89_chan *chan,
4086 			       unsigned int ms)
4087 {
4088 	int ret;
4089 
4090 	rtw89_phy_rfk_report_prep(rtwdev);
4091 
4092 	ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan);
4093 	if (ret)
4094 		return ret;
4095 
4096 	return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms);
4097 }
4098 EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait);
4099 
rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,unsigned int ms)4100 int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev,
4101 			       enum rtw89_phy_idx phy_idx,
4102 			       const struct rtw89_chan *chan,
4103 			       unsigned int ms)
4104 {
4105 	int ret;
4106 
4107 	rtw89_phy_rfk_report_prep(rtwdev);
4108 
4109 	ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan);
4110 	if (ret)
4111 		return ret;
4112 
4113 	return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms);
4114 }
4115 EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait);
4116 
rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,unsigned int ms)4117 int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev,
4118 				  enum rtw89_phy_idx phy_idx,
4119 				  const struct rtw89_chan *chan,
4120 				  unsigned int ms)
4121 {
4122 	int ret;
4123 
4124 	rtw89_phy_rfk_report_prep(rtwdev);
4125 
4126 	ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan);
4127 	if (ret)
4128 		return ret;
4129 
4130 	return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms);
4131 }
4132 EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait);
4133 
rtw89_phy_rfk_dack_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,unsigned int ms)4134 int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev,
4135 				enum rtw89_phy_idx phy_idx,
4136 				const struct rtw89_chan *chan,
4137 				unsigned int ms)
4138 {
4139 	int ret;
4140 
4141 	rtw89_phy_rfk_report_prep(rtwdev);
4142 
4143 	ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan);
4144 	if (ret)
4145 		return ret;
4146 
4147 	return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms);
4148 }
4149 EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait);
4150 
rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,bool is_chl_k,unsigned int ms)4151 int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev,
4152 				 enum rtw89_phy_idx phy_idx,
4153 				 const struct rtw89_chan *chan,
4154 				 bool is_chl_k, unsigned int ms)
4155 {
4156 	int ret;
4157 
4158 	rtw89_phy_rfk_report_prep(rtwdev);
4159 
4160 	ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan, is_chl_k);
4161 	if (ret)
4162 		return ret;
4163 
4164 	return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms);
4165 }
4166 EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait);
4167 
rtw89_phy_rfk_txiqk_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,unsigned int ms)4168 int rtw89_phy_rfk_txiqk_and_wait(struct rtw89_dev *rtwdev,
4169 				 enum rtw89_phy_idx phy_idx,
4170 				 const struct rtw89_chan *chan,
4171 				 unsigned int ms)
4172 {
4173 	int ret;
4174 
4175 	rtw89_phy_rfk_report_prep(rtwdev);
4176 
4177 	ret = rtw89_fw_h2c_rf_txiqk(rtwdev, phy_idx, chan);
4178 	if (ret)
4179 		return ret;
4180 
4181 	return rtw89_phy_rfk_report_wait(rtwdev, "TX_IQK", ms);
4182 }
4183 EXPORT_SYMBOL(rtw89_phy_rfk_txiqk_and_wait);
4184 
rtw89_phy_rfk_cim3k_and_wait(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,unsigned int ms)4185 int rtw89_phy_rfk_cim3k_and_wait(struct rtw89_dev *rtwdev,
4186 				 enum rtw89_phy_idx phy_idx,
4187 				 const struct rtw89_chan *chan,
4188 				 unsigned int ms)
4189 {
4190 	int ret;
4191 
4192 	rtw89_phy_rfk_report_prep(rtwdev);
4193 
4194 	ret = rtw89_fw_h2c_rf_cim3k(rtwdev, phy_idx, chan);
4195 	if (ret)
4196 		return ret;
4197 
4198 	return rtw89_phy_rfk_report_wait(rtwdev, "CIM3k", ms);
4199 }
4200 EXPORT_SYMBOL(rtw89_phy_rfk_cim3k_and_wait);
4201 
phy_tssi_get_cck_group(u8 ch)4202 static u32 phy_tssi_get_cck_group(u8 ch)
4203 {
4204 	switch (ch) {
4205 	case 1 ... 2:
4206 		return 0;
4207 	case 3 ... 5:
4208 		return 1;
4209 	case 6 ... 8:
4210 		return 2;
4211 	case 9 ... 11:
4212 		return 3;
4213 	case 12 ... 13:
4214 		return 4;
4215 	case 14:
4216 		return 5;
4217 	}
4218 
4219 	return 0;
4220 }
4221 
4222 #define PHY_TSSI_EXTRA_GROUP_BIT BIT(31)
4223 #define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx))
4224 #define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT)
4225 #define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \
4226 	((group) & ~PHY_TSSI_EXTRA_GROUP_BIT)
4227 #define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \
4228 	(PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
4229 
phy_tssi_get_ofdm_group(u8 ch)4230 static u32 phy_tssi_get_ofdm_group(u8 ch)
4231 {
4232 	switch (ch) {
4233 	case 1 ... 2:
4234 		return 0;
4235 	case 3 ... 5:
4236 		return 1;
4237 	case 6 ... 8:
4238 		return 2;
4239 	case 9 ... 11:
4240 		return 3;
4241 	case 12 ... 14:
4242 		return 4;
4243 	case 36 ... 40:
4244 		return 5;
4245 	case 41 ... 43:
4246 		return PHY_TSSI_EXTRA_GROUP(5);
4247 	case 44 ... 48:
4248 		return 6;
4249 	case 49 ... 51:
4250 		return PHY_TSSI_EXTRA_GROUP(6);
4251 	case 52 ... 56:
4252 		return 7;
4253 	case 57 ... 59:
4254 		return PHY_TSSI_EXTRA_GROUP(7);
4255 	case 60 ... 64:
4256 		return 8;
4257 	case 100 ... 104:
4258 		return 9;
4259 	case 105 ... 107:
4260 		return PHY_TSSI_EXTRA_GROUP(9);
4261 	case 108 ... 112:
4262 		return 10;
4263 	case 113 ... 115:
4264 		return PHY_TSSI_EXTRA_GROUP(10);
4265 	case 116 ... 120:
4266 		return 11;
4267 	case 121 ... 123:
4268 		return PHY_TSSI_EXTRA_GROUP(11);
4269 	case 124 ... 128:
4270 		return 12;
4271 	case 129 ... 131:
4272 		return PHY_TSSI_EXTRA_GROUP(12);
4273 	case 132 ... 136:
4274 		return 13;
4275 	case 137 ... 139:
4276 		return PHY_TSSI_EXTRA_GROUP(13);
4277 	case 140 ... 144:
4278 		return 14;
4279 	case 149 ... 153:
4280 		return 15;
4281 	case 154 ... 156:
4282 		return PHY_TSSI_EXTRA_GROUP(15);
4283 	case 157 ... 161:
4284 		return 16;
4285 	case 162 ... 164:
4286 		return PHY_TSSI_EXTRA_GROUP(16);
4287 	case 165 ... 169:
4288 		return 17;
4289 	case 170 ... 172:
4290 		return PHY_TSSI_EXTRA_GROUP(17);
4291 	case 173 ... 177:
4292 		return 18;
4293 	}
4294 
4295 	return 0;
4296 }
4297 
phy_tssi_get_6g_ofdm_group(u8 ch)4298 static u32 phy_tssi_get_6g_ofdm_group(u8 ch)
4299 {
4300 	switch (ch) {
4301 	case 1 ... 5:
4302 		return 0;
4303 	case 6 ... 8:
4304 		return PHY_TSSI_EXTRA_GROUP(0);
4305 	case 9 ... 13:
4306 		return 1;
4307 	case 14 ... 16:
4308 		return PHY_TSSI_EXTRA_GROUP(1);
4309 	case 17 ... 21:
4310 		return 2;
4311 	case 22 ... 24:
4312 		return PHY_TSSI_EXTRA_GROUP(2);
4313 	case 25 ... 29:
4314 		return 3;
4315 	case 33 ... 37:
4316 		return 4;
4317 	case 38 ... 40:
4318 		return PHY_TSSI_EXTRA_GROUP(4);
4319 	case 41 ... 45:
4320 		return 5;
4321 	case 46 ... 48:
4322 		return PHY_TSSI_EXTRA_GROUP(5);
4323 	case 49 ... 53:
4324 		return 6;
4325 	case 54 ... 56:
4326 		return PHY_TSSI_EXTRA_GROUP(6);
4327 	case 57 ... 61:
4328 		return 7;
4329 	case 65 ... 69:
4330 		return 8;
4331 	case 70 ... 72:
4332 		return PHY_TSSI_EXTRA_GROUP(8);
4333 	case 73 ... 77:
4334 		return 9;
4335 	case 78 ... 80:
4336 		return PHY_TSSI_EXTRA_GROUP(9);
4337 	case 81 ... 85:
4338 		return 10;
4339 	case 86 ... 88:
4340 		return PHY_TSSI_EXTRA_GROUP(10);
4341 	case 89 ... 93:
4342 		return 11;
4343 	case 97 ... 101:
4344 		return 12;
4345 	case 102 ... 104:
4346 		return PHY_TSSI_EXTRA_GROUP(12);
4347 	case 105 ... 109:
4348 		return 13;
4349 	case 110 ... 112:
4350 		return PHY_TSSI_EXTRA_GROUP(13);
4351 	case 113 ... 117:
4352 		return 14;
4353 	case 118 ... 120:
4354 		return PHY_TSSI_EXTRA_GROUP(14);
4355 	case 121 ... 125:
4356 		return 15;
4357 	case 129 ... 133:
4358 		return 16;
4359 	case 134 ... 136:
4360 		return PHY_TSSI_EXTRA_GROUP(16);
4361 	case 137 ... 141:
4362 		return 17;
4363 	case 142 ... 144:
4364 		return PHY_TSSI_EXTRA_GROUP(17);
4365 	case 145 ... 149:
4366 		return 18;
4367 	case 150 ... 152:
4368 		return PHY_TSSI_EXTRA_GROUP(18);
4369 	case 153 ... 157:
4370 		return 19;
4371 	case 161 ... 165:
4372 		return 20;
4373 	case 166 ... 168:
4374 		return PHY_TSSI_EXTRA_GROUP(20);
4375 	case 169 ... 173:
4376 		return 21;
4377 	case 174 ... 176:
4378 		return PHY_TSSI_EXTRA_GROUP(21);
4379 	case 177 ... 181:
4380 		return 22;
4381 	case 182 ... 184:
4382 		return PHY_TSSI_EXTRA_GROUP(22);
4383 	case 185 ... 189:
4384 		return 23;
4385 	case 193 ... 197:
4386 		return 24;
4387 	case 198 ... 200:
4388 		return PHY_TSSI_EXTRA_GROUP(24);
4389 	case 201 ... 205:
4390 		return 25;
4391 	case 206 ... 208:
4392 		return PHY_TSSI_EXTRA_GROUP(25);
4393 	case 209 ... 213:
4394 		return 26;
4395 	case 214 ... 216:
4396 		return PHY_TSSI_EXTRA_GROUP(26);
4397 	case 217 ... 221:
4398 		return 27;
4399 	case 225 ... 229:
4400 		return 28;
4401 	case 230 ... 232:
4402 		return PHY_TSSI_EXTRA_GROUP(28);
4403 	case 233 ... 237:
4404 		return 29;
4405 	case 238 ... 240:
4406 		return PHY_TSSI_EXTRA_GROUP(29);
4407 	case 241 ... 245:
4408 		return 30;
4409 	case 246 ... 248:
4410 		return PHY_TSSI_EXTRA_GROUP(30);
4411 	case 249 ... 253:
4412 		return 31;
4413 	}
4414 
4415 	return 0;
4416 }
4417 
phy_tssi_get_trim_group(u8 ch)4418 static u32 phy_tssi_get_trim_group(u8 ch)
4419 {
4420 	switch (ch) {
4421 	case 1 ... 8:
4422 		return 0;
4423 	case 9 ... 14:
4424 		return 1;
4425 	case 36 ... 48:
4426 		return 2;
4427 	case 49 ... 51:
4428 		return PHY_TSSI_EXTRA_GROUP(2);
4429 	case 52 ... 64:
4430 		return 3;
4431 	case 100 ... 112:
4432 		return 4;
4433 	case 113 ... 115:
4434 		return PHY_TSSI_EXTRA_GROUP(4);
4435 	case 116 ... 128:
4436 		return 5;
4437 	case 132 ... 144:
4438 		return 6;
4439 	case 149 ... 177:
4440 		return 7;
4441 	}
4442 
4443 	return 0;
4444 }
4445 
phy_tssi_get_6g_trim_group(u8 ch)4446 static u32 phy_tssi_get_6g_trim_group(u8 ch)
4447 {
4448 	switch (ch) {
4449 	case 1 ... 13:
4450 		return 0;
4451 	case 14 ... 16:
4452 		return PHY_TSSI_EXTRA_GROUP(0);
4453 	case 17 ... 29:
4454 		return 1;
4455 	case 33 ... 45:
4456 		return 2;
4457 	case 46 ... 48:
4458 		return PHY_TSSI_EXTRA_GROUP(2);
4459 	case 49 ... 61:
4460 		return 3;
4461 	case 65 ... 77:
4462 		return 4;
4463 	case 78 ... 80:
4464 		return PHY_TSSI_EXTRA_GROUP(4);
4465 	case 81 ... 93:
4466 		return 5;
4467 	case 97 ... 109:
4468 		return 6;
4469 	case 110 ... 112:
4470 		return PHY_TSSI_EXTRA_GROUP(6);
4471 	case 113 ... 125:
4472 		return 7;
4473 	case 129 ... 141:
4474 		return 8;
4475 	case 142 ... 144:
4476 		return PHY_TSSI_EXTRA_GROUP(8);
4477 	case 145 ... 157:
4478 		return 9;
4479 	case 161 ... 173:
4480 		return 10;
4481 	case 174 ... 176:
4482 		return PHY_TSSI_EXTRA_GROUP(10);
4483 	case 177 ... 189:
4484 		return 11;
4485 	case 193 ... 205:
4486 		return 12;
4487 	case 206 ... 208:
4488 		return PHY_TSSI_EXTRA_GROUP(12);
4489 	case 209 ... 221:
4490 		return 13;
4491 	case 225 ... 237:
4492 		return 14;
4493 	case 238 ... 240:
4494 		return PHY_TSSI_EXTRA_GROUP(14);
4495 	case 241 ... 253:
4496 		return 15;
4497 	}
4498 
4499 	return 0;
4500 }
4501 
phy_tssi_get_ofdm_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan,enum rtw89_rf_path path)4502 static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev,
4503 			       enum rtw89_phy_idx phy,
4504 			       const struct rtw89_chan *chan,
4505 			       enum rtw89_rf_path path)
4506 {
4507 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4508 	enum rtw89_band band = chan->band_type;
4509 	u8 ch = chan->channel;
4510 	u32 gidx_1st;
4511 	u32 gidx_2nd;
4512 	s8 de_1st;
4513 	s8 de_2nd;
4514 	u32 gidx;
4515 	s8 val;
4516 
4517 	if (band == RTW89_BAND_6G)
4518 		goto calc_6g;
4519 
4520 	gidx = phy_tssi_get_ofdm_group(ch);
4521 
4522 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4523 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
4524 		    path, gidx);
4525 
4526 	if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
4527 		gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
4528 		gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
4529 		de_1st = tssi_info->tssi_mcs[path][gidx_1st];
4530 		de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
4531 		val = (de_1st + de_2nd) / 2;
4532 
4533 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4534 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
4535 			    path, val, de_1st, de_2nd);
4536 	} else {
4537 		val = tssi_info->tssi_mcs[path][gidx];
4538 
4539 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4540 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
4541 	}
4542 
4543 	return val;
4544 
4545 calc_6g:
4546 	gidx = phy_tssi_get_6g_ofdm_group(ch);
4547 
4548 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4549 		    "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
4550 		    path, gidx);
4551 
4552 	if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) {
4553 		gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx);
4554 		gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx);
4555 		de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st];
4556 		de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd];
4557 		val = (de_1st + de_2nd) / 2;
4558 
4559 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4560 			    "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
4561 			    path, val, de_1st, de_2nd);
4562 	} else {
4563 		val = tssi_info->tssi_6g_mcs[path][gidx];
4564 
4565 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4566 			    "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
4567 	}
4568 
4569 	return val;
4570 }
4571 
phy_tssi_get_ofdm_trim_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan,enum rtw89_rf_path path)4572 static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
4573 				    enum rtw89_phy_idx phy,
4574 				    const struct rtw89_chan *chan,
4575 				    enum rtw89_rf_path path)
4576 {
4577 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4578 	enum rtw89_band band = chan->band_type;
4579 	u8 ch = chan->channel;
4580 	u32 tgidx_1st;
4581 	u32 tgidx_2nd;
4582 	s8 tde_1st;
4583 	s8 tde_2nd;
4584 	u32 tgidx;
4585 	s8 val;
4586 
4587 	if (band == RTW89_BAND_6G)
4588 		goto calc_6g;
4589 
4590 	tgidx = phy_tssi_get_trim_group(ch);
4591 
4592 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4593 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
4594 		    path, tgidx);
4595 
4596 	if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
4597 		tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
4598 		tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
4599 		tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
4600 		tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
4601 		val = (tde_1st + tde_2nd) / 2;
4602 
4603 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4604 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
4605 			    path, val, tde_1st, tde_2nd);
4606 	} else {
4607 		val = tssi_info->tssi_trim[path][tgidx];
4608 
4609 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4610 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
4611 			    path, val);
4612 	}
4613 
4614 	return val;
4615 
4616 calc_6g:
4617 	tgidx = phy_tssi_get_6g_trim_group(ch);
4618 
4619 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4620 		    "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
4621 		    path, tgidx);
4622 
4623 	if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) {
4624 		tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
4625 		tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
4626 		tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st];
4627 		tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd];
4628 		val = (tde_1st + tde_2nd) / 2;
4629 
4630 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4631 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
4632 			    path, val, tde_1st, tde_2nd);
4633 	} else {
4634 		val = tssi_info->tssi_trim_6g[path][tgidx];
4635 
4636 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4637 			    "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
4638 			    path, val);
4639 	}
4640 
4641 	return val;
4642 }
4643 
rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan,struct rtw89_h2c_rf_tssi * h2c)4644 void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev,
4645 					       enum rtw89_phy_idx phy,
4646 					       const struct rtw89_chan *chan,
4647 					       struct rtw89_h2c_rf_tssi *h2c)
4648 {
4649 	const struct rtw89_chip_info *chip = rtwdev->chip;
4650 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4651 	u8 ch = chan->channel;
4652 	s8 trim_de;
4653 	s8 ofdm_de;
4654 	s8 cck_de;
4655 	u8 gidx;
4656 	s8 val;
4657 	int i;
4658 
4659 	rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
4660 		    phy, ch);
4661 
4662 	for (i = RF_PATH_A; i <= RF_PATH_B; i++) {
4663 		trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i);
4664 		h2c->curr_tssi_trim_de[i] = trim_de;
4665 
4666 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4667 			    "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de);
4668 
4669 		gidx = phy_tssi_get_cck_group(ch);
4670 		cck_de = tssi_info->tssi_cck[i][gidx];
4671 		val = u32_get_bits(cck_de + trim_de, 0xff);
4672 
4673 		if (chip->chip_id == RTL8922A) {
4674 			h2c->curr_tssi_cck_de[i] = 0x0;
4675 			h2c->curr_tssi_cck_de_20m[i] = val;
4676 			h2c->curr_tssi_cck_de_40m[i] = val;
4677 		} else {
4678 			h2c->curr_tssi_cck_de[i] = val;
4679 		}
4680 
4681 		h2c->curr_tssi_efuse_cck_de[i] = cck_de;
4682 
4683 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4684 			    "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de);
4685 
4686 		ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i);
4687 		val = u32_get_bits(ofdm_de + trim_de, 0xff);
4688 
4689 		if (chip->chip_id == RTL8922A) {
4690 			h2c->curr_tssi_ofdm_de[i] = 0x0;
4691 			h2c->curr_tssi_ofdm_de_20m[i] = val;
4692 			h2c->curr_tssi_ofdm_de_40m[i] = val;
4693 			h2c->curr_tssi_ofdm_de_80m[i] = val;
4694 			h2c->curr_tssi_ofdm_de_160m[i] = val;
4695 			h2c->curr_tssi_ofdm_de_320m[i] = val;
4696 		} else {
4697 			h2c->curr_tssi_ofdm_de[i] = val;
4698 		}
4699 
4700 		h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de;
4701 
4702 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4703 			    "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de);
4704 	}
4705 }
4706 
rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy,const struct rtw89_chan * chan,struct rtw89_h2c_rf_tssi * h2c)4707 void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev,
4708 					      enum rtw89_phy_idx phy,
4709 					      const struct rtw89_chan *chan,
4710 					      struct rtw89_h2c_rf_tssi *h2c)
4711 {
4712 	struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
4713 	struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
4714 	const struct rtw89_chip_info *chip = rtwdev->chip;
4715 	const s8 *thm_up[RF_PATH_B + 1] = {};
4716 	const s8 *thm_down[RF_PATH_B + 1] = {};
4717 	u8 subband = chan->subband_type;
4718 	s8 thm_ofst[128] = {};
4719 	int multiplier;
4720 	u8 thermal;
4721 	u8 path;
4722 	u8 i, j;
4723 
4724 	switch (subband) {
4725 	default:
4726 	case RTW89_CH_2G:
4727 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
4728 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
4729 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
4730 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
4731 		break;
4732 	case RTW89_CH_5G_BAND_1:
4733 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
4734 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
4735 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
4736 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
4737 		break;
4738 	case RTW89_CH_5G_BAND_3:
4739 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
4740 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
4741 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
4742 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
4743 		break;
4744 	case RTW89_CH_5G_BAND_4:
4745 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
4746 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
4747 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
4748 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
4749 		break;
4750 	case RTW89_CH_6G_BAND_IDX0:
4751 	case RTW89_CH_6G_BAND_IDX1:
4752 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0];
4753 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0];
4754 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0];
4755 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0];
4756 		break;
4757 	case RTW89_CH_6G_BAND_IDX2:
4758 	case RTW89_CH_6G_BAND_IDX3:
4759 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1];
4760 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1];
4761 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1];
4762 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1];
4763 		break;
4764 	case RTW89_CH_6G_BAND_IDX4:
4765 	case RTW89_CH_6G_BAND_IDX5:
4766 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2];
4767 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2];
4768 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2];
4769 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2];
4770 		break;
4771 	case RTW89_CH_6G_BAND_IDX6:
4772 	case RTW89_CH_6G_BAND_IDX7:
4773 		thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3];
4774 		thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3];
4775 		thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3];
4776 		thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3];
4777 		break;
4778 	}
4779 
4780 	rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4781 		    "[TSSI] tmeter tbl on subband: %u\n", subband);
4782 
4783 	if (chip->chip_id == RTL8922A)
4784 		multiplier = 1;
4785 	else
4786 		multiplier = -1;
4787 
4788 	for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
4789 		thermal = tssi_info->thermal[path];
4790 		rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4791 			    "path: %u, pg thermal: 0x%x\n", path, thermal);
4792 
4793 		if (thermal == 0xff) {
4794 			h2c->pg_thermal[path] = 0x38;
4795 			memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path]));
4796 			continue;
4797 		}
4798 
4799 		h2c->pg_thermal[path] = thermal;
4800 
4801 		i = 0;
4802 		for (j = 0; j < 64; j++) {
4803 			thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
4804 				      thm_up[path][i++] :
4805 				      thm_up[path][DELTA_SWINGIDX_SIZE - 1];
4806 			thm_ofst[j] *= multiplier;
4807 		}
4808 
4809 		i = 1;
4810 		for (j = 127; j >= 64; j--) {
4811 			thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
4812 				      -thm_down[path][i++] :
4813 				      -thm_down[path][DELTA_SWINGIDX_SIZE - 1];
4814 			thm_ofst[j] *= multiplier;
4815 		}
4816 
4817 		for (i = 0; i < 128; i += 4) {
4818 			h2c->ftable[path][i + 0] = thm_ofst[i + 3];
4819 			h2c->ftable[path][i + 1] = thm_ofst[i + 2];
4820 			h2c->ftable[path][i + 2] = thm_ofst[i + 1];
4821 			h2c->ftable[path][i + 3] = thm_ofst[i + 0];
4822 
4823 			rtw89_debug(rtwdev, RTW89_DBG_TSSI,
4824 				    "thm ofst [%x]: %02x %02x %02x %02x\n",
4825 				    i, thm_ofst[i], thm_ofst[i + 1],
4826 				    thm_ofst[i + 2], thm_ofst[i + 3]);
4827 		}
4828 	}
4829 }
4830 
rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev * rtwdev,bool sc_xo)4831 static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
4832 {
4833 	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
4834 	u32 reg_mask;
4835 
4836 	if (sc_xo)
4837 		reg_mask = xtal->sc_xo_mask;
4838 	else
4839 		reg_mask = xtal->sc_xi_mask;
4840 
4841 	return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask);
4842 }
4843 
rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev * rtwdev,bool sc_xo,u8 val)4844 static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
4845 				       u8 val)
4846 {
4847 	const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info;
4848 	u32 reg_mask;
4849 
4850 	if (sc_xo)
4851 		reg_mask = xtal->sc_xo_mask;
4852 	else
4853 		reg_mask = xtal->sc_xi_mask;
4854 
4855 	rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val);
4856 }
4857 
rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev * rtwdev,u8 crystal_cap,bool force)4858 static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
4859 					  u8 crystal_cap, bool force)
4860 {
4861 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4862 	const struct rtw89_chip_info *chip = rtwdev->chip;
4863 	u8 sc_xi_val, sc_xo_val;
4864 
4865 	if (!force && cfo->crystal_cap == crystal_cap)
4866 		return;
4867 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) {
4868 		rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
4869 		rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
4870 		sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
4871 		sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
4872 	} else {
4873 		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO,
4874 					crystal_cap, XTAL_SC_XO_MASK);
4875 		rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI,
4876 					crystal_cap, XTAL_SC_XI_MASK);
4877 		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val);
4878 		rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val);
4879 	}
4880 	cfo->crystal_cap = sc_xi_val;
4881 	cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
4882 
4883 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val);
4884 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val);
4885 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n",
4886 		    cfo->x_cap_ofst);
4887 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n");
4888 }
4889 
rtw89_phy_cfo_reset(struct rtw89_dev * rtwdev)4890 static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
4891 {
4892 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4893 	u8 cap;
4894 
4895 	cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK;
4896 	cfo->is_adjust = false;
4897 	if (cfo->crystal_cap == cfo->def_x_cap)
4898 		return;
4899 	cap = cfo->crystal_cap;
4900 	cap += (cap > cfo->def_x_cap ? -1 : 1);
4901 	rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false);
4902 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
4903 		    "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap,
4904 		    cfo->def_x_cap);
4905 }
4906 
rtw89_dcfo_comp(struct rtw89_dev * rtwdev,s32 curr_cfo)4907 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
4908 {
4909 	const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp;
4910 	bool is_linked = rtwdev->total_sta_assoc > 0;
4911 	s32 cfo_avg_312;
4912 	s32 dcfo_comp_val;
4913 	int sign;
4914 
4915 	if (!dcfo_comp)
4916 		return;
4917 
4918 	if (!is_linked) {
4919 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n",
4920 			    is_linked);
4921 		return;
4922 	}
4923 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
4924 	if (curr_cfo == 0)
4925 		return;
4926 	dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
4927 	sign = curr_cfo > 0 ? 1 : -1;
4928 	cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val;
4929 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312);
4930 	if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
4931 		cfo_avg_312 = -cfo_avg_312;
4932 	rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask,
4933 			       cfo_avg_312);
4934 }
4935 
rtw89_dcfo_comp_init(struct rtw89_dev * rtwdev)4936 static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev)
4937 {
4938 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
4939 	const struct rtw89_chip_info *chip = rtwdev->chip;
4940 	const struct rtw89_cfo_regs *cfo = phy->cfo;
4941 
4942 	rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1);
4943 	rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8);
4944 
4945 	if (chip->chip_gen == RTW89_CHIP_AX) {
4946 		if (chip->cfo_hw_comp) {
4947 			rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2,
4948 					   B_AX_PWR_UL_CFO_MASK, 0x6);
4949 		} else {
4950 			rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1);
4951 			rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2,
4952 					  B_AX_PWR_UL_CFO_MASK);
4953 		}
4954 	}
4955 }
4956 
rtw89_phy_cfo_init(struct rtw89_dev * rtwdev)4957 static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
4958 {
4959 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4960 	struct rtw89_efuse *efuse = &rtwdev->efuse;
4961 
4962 	cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
4963 	cfo->crystal_cap = cfo->crystal_cap_default;
4964 	cfo->def_x_cap = cfo->crystal_cap;
4965 	cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f);
4966 	cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1);
4967 	cfo->is_adjust = false;
4968 	cfo->divergence_lock_en = false;
4969 	cfo->x_cap_ofst = 0;
4970 	cfo->lock_cnt = 0;
4971 	cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
4972 	cfo->apply_compensation = false;
4973 	cfo->residual_cfo_acc = 0;
4974 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n",
4975 		    cfo->crystal_cap_default);
4976 	rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true);
4977 	rtw89_dcfo_comp_init(rtwdev);
4978 	cfo->cfo_timer_ms = 2000;
4979 	cfo->cfo_trig_by_timer_en = false;
4980 	cfo->phy_cfo_trk_cnt = 0;
4981 	cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
4982 	cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE;
4983 }
4984 
rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev * rtwdev,s32 curr_cfo)4985 static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
4986 					     s32 curr_cfo)
4987 {
4988 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
4989 	int crystal_cap = cfo->crystal_cap;
4990 	s32 cfo_abs = abs(curr_cfo);
4991 	int sign;
4992 
4993 	if (curr_cfo == 0) {
4994 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
4995 		return;
4996 	}
4997 	if (!cfo->is_adjust) {
4998 		if (cfo_abs > CFO_TRK_ENABLE_TH)
4999 			cfo->is_adjust = true;
5000 	} else {
5001 		if (cfo_abs <= CFO_TRK_STOP_TH)
5002 			cfo->is_adjust = false;
5003 	}
5004 	if (!cfo->is_adjust) {
5005 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n");
5006 		return;
5007 	}
5008 	sign = curr_cfo > 0 ? 1 : -1;
5009 	if (cfo_abs > CFO_TRK_STOP_TH_4)
5010 		crystal_cap += 3 * sign;
5011 	else if (cfo_abs > CFO_TRK_STOP_TH_3)
5012 		crystal_cap += 3 * sign;
5013 	else if (cfo_abs > CFO_TRK_STOP_TH_2)
5014 		crystal_cap += 1 * sign;
5015 	else if (cfo_abs > CFO_TRK_STOP_TH_1)
5016 		crystal_cap += 1 * sign;
5017 	else
5018 		return;
5019 
5020 	crystal_cap = clamp(crystal_cap, 0, 127);
5021 	rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
5022 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5023 		    "X_cap{Curr,Default}={0x%x,0x%x}\n",
5024 		    cfo->crystal_cap, cfo->def_x_cap);
5025 }
5026 
rtw89_phy_average_cfo_calc(struct rtw89_dev * rtwdev)5027 static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev)
5028 {
5029 	const struct rtw89_chip_info *chip = rtwdev->chip;
5030 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5031 	s32 cfo_khz_all = 0;
5032 	s32 cfo_cnt_all = 0;
5033 	s32 cfo_all_avg = 0;
5034 	u8 i;
5035 
5036 	if (rtwdev->total_sta_assoc != 1)
5037 		return 0;
5038 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n");
5039 	for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
5040 		if (cfo->cfo_cnt[i] == 0)
5041 			continue;
5042 		cfo_khz_all += cfo->cfo_tail[i];
5043 		cfo_cnt_all += cfo->cfo_cnt[i];
5044 		cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all);
5045 		cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
5046 		cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft,
5047 					cfo_cnt_all);
5048 	}
5049 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5050 		    "CFO track for macid = %d\n", i);
5051 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5052 		    "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n",
5053 		    cfo_khz_all, cfo_cnt_all, cfo_all_avg);
5054 	return cfo_all_avg;
5055 }
5056 
rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev * rtwdev)5057 static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev)
5058 {
5059 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5060 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
5061 	s32 target_cfo = 0;
5062 	s32 cfo_khz_all = 0;
5063 	s32 cfo_khz_all_tp_wgt = 0;
5064 	s32 cfo_avg = 0;
5065 	s32 max_cfo_lb = BIT(31);
5066 	s32 min_cfo_ub = GENMASK(30, 0);
5067 	u16 cfo_cnt_all = 0;
5068 	u8 active_entry_cnt = 0;
5069 	u8 sta_cnt = 0;
5070 	u32 tp_all = 0;
5071 	u8 i;
5072 	u8 cfo_tol = 0;
5073 
5074 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n");
5075 	if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) {
5076 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n");
5077 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
5078 			if (cfo->cfo_cnt[i] == 0)
5079 				continue;
5080 			cfo_khz_all += cfo->cfo_tail[i];
5081 			cfo_cnt_all += cfo->cfo_cnt[i];
5082 			cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all);
5083 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5084 				    "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n",
5085 				    cfo_khz_all, cfo_cnt_all, cfo_avg);
5086 			target_cfo = cfo_avg;
5087 		}
5088 	} else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) {
5089 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n");
5090 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
5091 			if (cfo->cfo_cnt[i] == 0)
5092 				continue;
5093 			cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
5094 						  (s32)cfo->cfo_cnt[i]);
5095 			cfo_khz_all += cfo->cfo_avg[i];
5096 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5097 				    "Macid=%d, cfo_avg=%d\n", i,
5098 				    cfo->cfo_avg[i]);
5099 		}
5100 		sta_cnt = rtwdev->total_sta_assoc;
5101 		cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt);
5102 		rtw89_debug(rtwdev, RTW89_DBG_CFO,
5103 			    "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n",
5104 			    cfo_khz_all, sta_cnt, cfo_avg);
5105 		target_cfo = cfo_avg;
5106 	} else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) {
5107 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n");
5108 		cfo_tol = cfo->sta_cfo_tolerance;
5109 		for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
5110 			sta_cnt++;
5111 			if (cfo->cfo_cnt[i] != 0) {
5112 				cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
5113 							  (s32)cfo->cfo_cnt[i]);
5114 				active_entry_cnt++;
5115 			} else {
5116 				cfo->cfo_avg[i] = cfo->pre_cfo_avg[i];
5117 			}
5118 			max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb);
5119 			min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub);
5120 			cfo_khz_all += cfo->cfo_avg[i];
5121 			/* need tp for each entry */
5122 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5123 				    "[%d] cfo_avg=%d, tp=tbd\n",
5124 				    i, cfo->cfo_avg[i]);
5125 			if (sta_cnt >= rtwdev->total_sta_assoc)
5126 				break;
5127 		}
5128 		tp_all = stats->rx_throughput; /* need tp for each entry */
5129 		cfo_avg =  phy_div(cfo_khz_all_tp_wgt, (s32)tp_all);
5130 
5131 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n",
5132 			    sta_cnt);
5133 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n",
5134 			    active_entry_cnt);
5135 		rtw89_debug(rtwdev, RTW89_DBG_CFO,
5136 			    "Msta cfo with tp_wgt=%d, avg_cfo=%d\n",
5137 			    cfo_khz_all_tp_wgt, cfo_avg);
5138 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n",
5139 			    max_cfo_lb, min_cfo_ub);
5140 		if (max_cfo_lb <= min_cfo_ub) {
5141 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5142 				    "cfo win_size=%d\n",
5143 				    min_cfo_ub - max_cfo_lb);
5144 			target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub);
5145 		} else {
5146 			rtw89_debug(rtwdev, RTW89_DBG_CFO,
5147 				    "No intersection of cfo tolerance windows\n");
5148 			target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt);
5149 		}
5150 		for (i = 0; i < CFO_TRACK_MAX_USER; i++)
5151 			cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
5152 	}
5153 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo);
5154 	return target_cfo;
5155 }
5156 
rtw89_phy_cfo_statistics_reset(struct rtw89_dev * rtwdev)5157 static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev)
5158 {
5159 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5160 
5161 	memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail));
5162 	memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt));
5163 	cfo->packet_count = 0;
5164 	cfo->packet_count_pre = 0;
5165 	cfo->cfo_avg_pre = 0;
5166 }
5167 
rtw89_phy_cfo_dm(struct rtw89_dev * rtwdev)5168 static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
5169 {
5170 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5171 	s32 new_cfo = 0;
5172 	bool x_cap_update = false;
5173 	u8 pre_x_cap = cfo->crystal_cap;
5174 	u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft;
5175 
5176 	cfo->dcfo_avg = 0;
5177 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
5178 		    rtwdev->total_sta_assoc);
5179 	if (rtwdev->total_sta_assoc == 0 || rtw89_is_mlo_1_1(rtwdev)) {
5180 		rtw89_phy_cfo_reset(rtwdev);
5181 		return;
5182 	}
5183 	if (cfo->packet_count == 0) {
5184 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n");
5185 		return;
5186 	}
5187 	if (cfo->packet_count == cfo->packet_count_pre) {
5188 		rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n");
5189 		return;
5190 	}
5191 	if (rtwdev->total_sta_assoc == 1)
5192 		new_cfo = rtw89_phy_average_cfo_calc(rtwdev);
5193 	else
5194 		new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev);
5195 	if (cfo->divergence_lock_en) {
5196 		cfo->lock_cnt++;
5197 		if (cfo->lock_cnt > CFO_PERIOD_CNT) {
5198 			cfo->divergence_lock_en = false;
5199 			cfo->lock_cnt = 0;
5200 		} else {
5201 			rtw89_phy_cfo_reset(rtwdev);
5202 		}
5203 		return;
5204 	}
5205 	if (cfo->crystal_cap >= cfo->x_cap_ub ||
5206 	    cfo->crystal_cap <= cfo->x_cap_lb) {
5207 		cfo->divergence_lock_en = true;
5208 		rtw89_phy_cfo_reset(rtwdev);
5209 		return;
5210 	}
5211 
5212 	rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
5213 	cfo->cfo_avg_pre = new_cfo;
5214 	cfo->dcfo_avg_pre = cfo->dcfo_avg;
5215 	x_cap_update =  cfo->crystal_cap != pre_x_cap;
5216 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
5217 	rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
5218 		    cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
5219 		    cfo->x_cap_ofst);
5220 	if (x_cap_update) {
5221 		if (cfo->dcfo_avg > 0)
5222 			cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
5223 		else
5224 			cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft;
5225 	}
5226 	rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg);
5227 	rtw89_phy_cfo_statistics_reset(rtwdev);
5228 }
5229 
rtw89_phy_cfo_track_work(struct wiphy * wiphy,struct wiphy_work * work)5230 void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work)
5231 {
5232 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
5233 						cfo_track_work.work);
5234 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5235 
5236 	lockdep_assert_wiphy(wiphy);
5237 
5238 	if (!cfo->cfo_trig_by_timer_en)
5239 		return;
5240 	rtw89_leave_ps_mode(rtwdev);
5241 	rtw89_phy_cfo_dm(rtwdev);
5242 	wiphy_delayed_work_queue(wiphy, &rtwdev->cfo_track_work,
5243 				 msecs_to_jiffies(cfo->cfo_timer_ms));
5244 }
5245 
rtw89_phy_cfo_start_work(struct rtw89_dev * rtwdev)5246 static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
5247 {
5248 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5249 
5250 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->cfo_track_work,
5251 				 msecs_to_jiffies(cfo->cfo_timer_ms));
5252 }
5253 
rtw89_phy_cfo_track(struct rtw89_dev * rtwdev)5254 void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
5255 {
5256 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5257 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
5258 	bool is_ul_ofdma = false, ofdma_acc_en = false;
5259 
5260 	if (stats->rx_tf_periodic > CFO_TF_CNT_TH)
5261 		is_ul_ofdma = true;
5262 	if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE &&
5263 	    is_ul_ofdma)
5264 		ofdma_acc_en = true;
5265 
5266 	switch (cfo->phy_cfo_status) {
5267 	case RTW89_PHY_DCFO_STATE_NORMAL:
5268 		if (stats->tx_throughput >= CFO_TP_UPPER) {
5269 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE;
5270 			cfo->cfo_trig_by_timer_en = true;
5271 			cfo->cfo_timer_ms = CFO_COMP_PERIOD;
5272 			rtw89_phy_cfo_start_work(rtwdev);
5273 		}
5274 		break;
5275 	case RTW89_PHY_DCFO_STATE_ENHANCE:
5276 		if (stats->tx_throughput <= CFO_TP_LOWER)
5277 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
5278 		else if (ofdma_acc_en &&
5279 			 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT)
5280 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD;
5281 		else
5282 			cfo->phy_cfo_trk_cnt++;
5283 
5284 		if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) {
5285 			cfo->phy_cfo_trk_cnt = 0;
5286 			cfo->cfo_trig_by_timer_en = false;
5287 		}
5288 		break;
5289 	case RTW89_PHY_DCFO_STATE_HOLD:
5290 		if (stats->tx_throughput <= CFO_TP_LOWER) {
5291 			cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
5292 			cfo->phy_cfo_trk_cnt = 0;
5293 			cfo->cfo_trig_by_timer_en = false;
5294 		} else {
5295 			cfo->phy_cfo_trk_cnt++;
5296 		}
5297 		break;
5298 	default:
5299 		cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
5300 		cfo->phy_cfo_trk_cnt = 0;
5301 		break;
5302 	}
5303 	rtw89_debug(rtwdev, RTW89_DBG_CFO,
5304 		    "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n",
5305 		    stats->tx_throughput, cfo->phy_cfo_status,
5306 		    cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt,
5307 		    ewma_thermal_read(&rtwdev->phystat.avg_thermal[0]));
5308 	if (cfo->cfo_trig_by_timer_en)
5309 		return;
5310 	rtw89_phy_cfo_dm(rtwdev);
5311 }
5312 
rtw89_phy_cfo_parse(struct rtw89_dev * rtwdev,s16 cfo_val,struct rtw89_rx_phy_ppdu * phy_ppdu)5313 void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
5314 			 struct rtw89_rx_phy_ppdu *phy_ppdu)
5315 {
5316 	struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
5317 	u8 macid = phy_ppdu->mac_id;
5318 
5319 	if (macid >= CFO_TRACK_MAX_USER) {
5320 		rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid);
5321 		return;
5322 	}
5323 
5324 	cfo->cfo_tail[macid] += cfo_val;
5325 	cfo->cfo_cnt[macid]++;
5326 	cfo->packet_count++;
5327 }
5328 
rtw89_phy_ul_tb_assoc(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)5329 void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
5330 {
5331 	const struct rtw89_chip_info *chip = rtwdev->chip;
5332 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
5333 						       rtwvif_link->chanctx_idx);
5334 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5335 
5336 	if (!chip->ul_tb_waveform_ctrl)
5337 		return;
5338 
5339 	rtwvif_link->def_tri_idx =
5340 		rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG);
5341 
5342 	if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV)
5343 		rtwvif_link->dyn_tb_bedge_en = false;
5344 	else if (chan->band_type >= RTW89_BAND_5G &&
5345 		 chan->band_width >= RTW89_CHANNEL_WIDTH_40)
5346 		rtwvif_link->dyn_tb_bedge_en = true;
5347 	else
5348 		rtwvif_link->dyn_tb_bedge_en = false;
5349 
5350 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5351 		    "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n",
5352 		    ul_tb_info->def_if_bandedge, rtwvif_link->def_tri_idx);
5353 	rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5354 		    "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n",
5355 		    rtwvif_link->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en);
5356 }
5357 
5358 struct rtw89_phy_ul_tb_check_data {
5359 	bool valid;
5360 	bool high_tf_client;
5361 	bool low_tf_client;
5362 	bool dyn_tb_bedge_en;
5363 	u8 def_tri_idx;
5364 };
5365 
5366 struct rtw89_phy_power_diff {
5367 	u32 q_00;
5368 	u32 q_11;
5369 	u32 q_matrix_en;
5370 	u32 ultb_1t_norm_160;
5371 	u32 ultb_2t_norm_160;
5372 	u32 com1_norm_1sts;
5373 	u32 com2_resp_1sts_path;
5374 };
5375 
rtw89_phy_ofdma_power_diff(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)5376 static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev,
5377 				       struct rtw89_vif_link *rtwvif_link)
5378 {
5379 	static const struct rtw89_phy_power_diff table[2] = {
5380 		{0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3},
5381 		{0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1},
5382 	};
5383 	const struct rtw89_phy_power_diff *param;
5384 	u32 reg;
5385 
5386 	if (!rtwdev->chip->ul_tb_pwr_diff)
5387 		return;
5388 
5389 	if (rtwvif_link->pwr_diff_en == rtwvif_link->pre_pwr_diff_en) {
5390 		rtwvif_link->pwr_diff_en = false;
5391 		return;
5392 	}
5393 
5394 	rtwvif_link->pre_pwr_diff_en = rtwvif_link->pwr_diff_en;
5395 	param = &table[rtwvif_link->pwr_diff_en];
5396 
5397 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL,
5398 			       param->q_00);
5399 	rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL,
5400 			       param->q_11);
5401 	rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX,
5402 			       B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en);
5403 
5404 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif_link->mac_idx);
5405 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160,
5406 			   param->ultb_1t_norm_160);
5407 
5408 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif_link->mac_idx);
5409 	rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160,
5410 			   param->ultb_2t_norm_160);
5411 
5412 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif_link->mac_idx);
5413 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS,
5414 			   param->com1_norm_1sts);
5415 
5416 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif_link->mac_idx);
5417 	rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH,
5418 			   param->com2_resp_1sts_path);
5419 }
5420 
5421 static
rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_phy_ul_tb_check_data * ul_tb_data)5422 void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev,
5423 				struct rtw89_vif_link *rtwvif_link,
5424 				struct rtw89_phy_ul_tb_check_data *ul_tb_data)
5425 {
5426 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
5427 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
5428 
5429 	if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
5430 		return;
5431 
5432 	if (!vif->cfg.assoc)
5433 		return;
5434 
5435 	if (rtwdev->chip->ul_tb_waveform_ctrl) {
5436 		if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH)
5437 			ul_tb_data->high_tf_client = true;
5438 		else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH)
5439 			ul_tb_data->low_tf_client = true;
5440 
5441 		ul_tb_data->valid = true;
5442 		ul_tb_data->def_tri_idx = rtwvif_link->def_tri_idx;
5443 		ul_tb_data->dyn_tb_bedge_en = rtwvif_link->dyn_tb_bedge_en;
5444 	}
5445 
5446 	rtw89_phy_ofdma_power_diff(rtwdev, rtwvif_link);
5447 }
5448 
rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev * rtwdev,struct rtw89_phy_ul_tb_check_data * ul_tb_data)5449 static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev,
5450 					  struct rtw89_phy_ul_tb_check_data *ul_tb_data)
5451 {
5452 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5453 
5454 	if (!rtwdev->chip->ul_tb_waveform_ctrl)
5455 		return;
5456 
5457 	if (ul_tb_data->dyn_tb_bedge_en) {
5458 		if (ul_tb_data->high_tf_client) {
5459 			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0);
5460 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5461 				    "[ULTB] Turn off if_bandedge\n");
5462 		} else if (ul_tb_data->low_tf_client) {
5463 			rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN,
5464 					       ul_tb_info->def_if_bandedge);
5465 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5466 				    "[ULTB] Set to default if_bandedge = %d\n",
5467 				    ul_tb_info->def_if_bandedge);
5468 		}
5469 	}
5470 
5471 	if (ul_tb_info->dyn_tb_tri_en) {
5472 		if (ul_tb_data->high_tf_client) {
5473 			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
5474 					       B_TXSHAPE_TRIANGULAR_CFG, 0);
5475 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5476 				    "[ULTB] Turn off Tx triangle\n");
5477 		} else if (ul_tb_data->low_tf_client) {
5478 			rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT,
5479 					       B_TXSHAPE_TRIANGULAR_CFG,
5480 					       ul_tb_data->def_tri_idx);
5481 			rtw89_debug(rtwdev, RTW89_DBG_UL_TB,
5482 				    "[ULTB] Set to default tx_shap_idx = %d\n",
5483 				    ul_tb_data->def_tri_idx);
5484 		}
5485 	}
5486 }
5487 
rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev * rtwdev)5488 void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev)
5489 {
5490 	const struct rtw89_chip_info *chip = rtwdev->chip;
5491 	struct rtw89_phy_ul_tb_check_data ul_tb_data = {};
5492 	struct rtw89_vif_link *rtwvif_link;
5493 	struct rtw89_vif *rtwvif;
5494 	unsigned int link_id;
5495 
5496 	if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff)
5497 		return;
5498 
5499 	if (rtwdev->total_sta_assoc != 1)
5500 		return;
5501 
5502 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
5503 		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
5504 			rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif_link, &ul_tb_data);
5505 
5506 	if (!ul_tb_data.valid)
5507 		return;
5508 
5509 	rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data);
5510 }
5511 
rtw89_phy_ul_tb_info_init(struct rtw89_dev * rtwdev)5512 static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev)
5513 {
5514 	const struct rtw89_chip_info *chip = rtwdev->chip;
5515 	struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info;
5516 
5517 	if (!chip->ul_tb_waveform_ctrl)
5518 		return;
5519 
5520 	ul_tb_info->dyn_tb_tri_en = true;
5521 	ul_tb_info->def_if_bandedge =
5522 		rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN);
5523 }
5524 
5525 static
rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats * antdiv_sts)5526 void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts)
5527 {
5528 	ewma_rssi_init(&antdiv_sts->cck_rssi_avg);
5529 	ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg);
5530 	ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg);
5531 	antdiv_sts->pkt_cnt_cck = 0;
5532 	antdiv_sts->pkt_cnt_ofdm = 0;
5533 	antdiv_sts->pkt_cnt_non_legacy = 0;
5534 	antdiv_sts->evm = 0;
5535 }
5536 
rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu,struct rtw89_antdiv_stats * stats)5537 static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev,
5538 					      struct rtw89_rx_phy_ppdu *phy_ppdu,
5539 					      struct rtw89_antdiv_stats *stats)
5540 {
5541 	if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) {
5542 		if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) {
5543 			ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg);
5544 			stats->pkt_cnt_cck++;
5545 		} else {
5546 			ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg);
5547 			stats->pkt_cnt_ofdm++;
5548 			stats->evm += phy_ppdu->ofdm.evm_min;
5549 		}
5550 	} else {
5551 		ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg);
5552 		stats->pkt_cnt_non_legacy++;
5553 		stats->evm += phy_ppdu->ofdm.evm_min;
5554 	}
5555 }
5556 
rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats * stats)5557 static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats)
5558 {
5559 	if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck &&
5560 	    stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm)
5561 		return ewma_rssi_read(&stats->non_legacy_rssi_avg);
5562 	else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck &&
5563 		 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy)
5564 		return ewma_rssi_read(&stats->ofdm_rssi_avg);
5565 	else
5566 		return ewma_rssi_read(&stats->cck_rssi_avg);
5567 }
5568 
rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats * stats)5569 static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats)
5570 {
5571 	return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm);
5572 }
5573 
rtw89_phy_antdiv_parse(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)5574 void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev,
5575 			    struct rtw89_rx_phy_ppdu *phy_ppdu)
5576 {
5577 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5578 	struct rtw89_hal *hal = &rtwdev->hal;
5579 
5580 	if (!hal->ant_diversity || hal->ant_diversity_fixed)
5581 		return;
5582 
5583 	rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats);
5584 
5585 	if (!antdiv->get_stats)
5586 		return;
5587 
5588 	if (hal->antenna_rx == RF_A)
5589 		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats);
5590 	else if (hal->antenna_rx == RF_B)
5591 		rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats);
5592 }
5593 
rtw89_phy_antdiv_reg_init(struct rtw89_dev * rtwdev)5594 static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev)
5595 {
5596 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN,
5597 			      0x0, RTW89_PHY_0);
5598 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL,
5599 			      0x0, RTW89_PHY_0);
5600 
5601 	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND,
5602 			      0x0, RTW89_PHY_0);
5603 	rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT,
5604 			      0x0, RTW89_PHY_0);
5605 
5606 	rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN,
5607 			      0x0, RTW89_PHY_0);
5608 
5609 	rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING,
5610 			      0x0100, RTW89_PHY_0);
5611 
5612 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX,
5613 			      0x1, RTW89_PHY_0);
5614 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL,
5615 			      0x0, RTW89_PHY_0);
5616 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G,
5617 			      0x0, RTW89_PHY_0);
5618 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G,
5619 			      0x0, RTW89_PHY_0);
5620 }
5621 
rtw89_phy_antdiv_sts_reset(struct rtw89_dev * rtwdev)5622 static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev)
5623 {
5624 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5625 
5626 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
5627 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats);
5628 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats);
5629 }
5630 
rtw89_phy_antdiv_init(struct rtw89_dev * rtwdev)5631 static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev)
5632 {
5633 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
5634 	struct rtw89_hal *hal = &rtwdev->hal;
5635 
5636 	if (!hal->ant_diversity)
5637 		return;
5638 
5639 	antdiv->get_stats = false;
5640 	antdiv->rssi_pre = 0;
5641 	rtw89_phy_antdiv_sts_reset(rtwdev);
5642 	rtw89_phy_antdiv_reg_init(rtwdev);
5643 }
5644 
rtw89_phy_thermal_protect(struct rtw89_dev * rtwdev)5645 static void rtw89_phy_thermal_protect(struct rtw89_dev *rtwdev)
5646 {
5647 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5648 	struct rtw89_hal *hal = &rtwdev->hal;
5649 	u8 th_max = phystat->last_thermal_max;
5650 	u8 lv = hal->thermal_prot_lv;
5651 
5652 	if (!hal->thermal_prot_th ||
5653 	    (hal->disabled_dm_bitmap & BIT(RTW89_DM_THERMAL_PROTECT)))
5654 		return;
5655 
5656 	if (th_max > hal->thermal_prot_th && lv < RTW89_THERMAL_PROT_LV_MAX)
5657 		lv++;
5658 	else if (th_max < hal->thermal_prot_th - 2 && lv > 0)
5659 		lv--;
5660 	else
5661 		return;
5662 
5663 	hal->thermal_prot_lv = lv;
5664 
5665 	rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "thermal protection lv=%d\n", lv);
5666 
5667 	rtw89_fw_h2c_tx_duty(rtwdev, hal->thermal_prot_lv);
5668 }
5669 
rtw89_phy_stat_thermal_update(struct rtw89_dev * rtwdev)5670 static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
5671 {
5672 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5673 	u8 th, th_max = 0;
5674 	int i;
5675 
5676 	for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
5677 		th = rtw89_chip_get_thermal(rtwdev, i);
5678 		if (th)
5679 			ewma_thermal_add(&phystat->avg_thermal[i], th);
5680 
5681 		rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
5682 			    "path(%d) thermal cur=%u avg=%ld", i, th,
5683 			    ewma_thermal_read(&phystat->avg_thermal[i]));
5684 
5685 		th_max = max(th_max, th);
5686 	}
5687 
5688 	phystat->last_thermal_max = th_max;
5689 }
5690 
5691 struct rtw89_phy_iter_rssi_data {
5692 	struct rtw89_dev *rtwdev;
5693 	bool rssi_changed;
5694 };
5695 
5696 static
__rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link * rtwsta_link,struct rtw89_phy_iter_rssi_data * rssi_data)5697 void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link,
5698 				       struct rtw89_phy_iter_rssi_data *rssi_data)
5699 {
5700 	struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link;
5701 	struct rtw89_dev *rtwdev = rssi_data->rtwdev;
5702 	struct rtw89_phy_ch_info *ch_info;
5703 	struct rtw89_bb_ctx *bb;
5704 	unsigned long rssi_curr;
5705 
5706 	rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi);
5707 	bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx);
5708 	ch_info = &bb->ch_info;
5709 
5710 	if (rssi_curr < ch_info->rssi_min) {
5711 		ch_info->rssi_min = rssi_curr;
5712 		ch_info->rssi_min_macid = rtwsta_link->mac_id;
5713 	}
5714 
5715 	if (rtwsta_link->prev_rssi == 0) {
5716 		rtwsta_link->prev_rssi = rssi_curr;
5717 	} else if (abs((int)rtwsta_link->prev_rssi - (int)rssi_curr) >
5718 		   (3 << RSSI_FACTOR)) {
5719 		rtwsta_link->prev_rssi = rssi_curr;
5720 		rssi_data->rssi_changed = true;
5721 	}
5722 }
5723 
rtw89_phy_stat_rssi_update_iter(void * data,struct ieee80211_sta * sta)5724 static void rtw89_phy_stat_rssi_update_iter(void *data,
5725 					    struct ieee80211_sta *sta)
5726 {
5727 	struct rtw89_phy_iter_rssi_data *rssi_data =
5728 					(struct rtw89_phy_iter_rssi_data *)data;
5729 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
5730 	struct rtw89_sta_link *rtwsta_link;
5731 	unsigned int link_id;
5732 
5733 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id)
5734 		__rtw89_phy_stat_rssi_update_iter(rtwsta_link, rssi_data);
5735 }
5736 
rtw89_phy_stat_rssi_update(struct rtw89_dev * rtwdev)5737 static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
5738 {
5739 	struct rtw89_phy_iter_rssi_data rssi_data = {};
5740 	struct rtw89_bb_ctx *bb;
5741 
5742 	rssi_data.rtwdev = rtwdev;
5743 	rtw89_for_each_active_bb(rtwdev, bb)
5744 		bb->ch_info.rssi_min = U8_MAX;
5745 
5746 	ieee80211_iterate_stations_atomic(rtwdev->hw,
5747 					  rtw89_phy_stat_rssi_update_iter,
5748 					  &rssi_data);
5749 	if (rssi_data.rssi_changed)
5750 		rtw89_btc_ntfy_wl_sta(rtwdev);
5751 }
5752 
rtw89_phy_stat_init(struct rtw89_dev * rtwdev)5753 static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev)
5754 {
5755 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5756 	int i;
5757 
5758 	for (i = 0; i < rtwdev->chip->rf_path_num; i++)
5759 		ewma_thermal_init(&phystat->avg_thermal[i]);
5760 
5761 	rtw89_phy_stat_thermal_update(rtwdev);
5762 
5763 	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
5764 	memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat));
5765 
5766 	ewma_rssi_init(&phystat->bcn_rssi);
5767 
5768 	rtwdev->hal.thermal_prot_lv = 0;
5769 }
5770 
rtw89_phy_stat_track(struct rtw89_dev * rtwdev)5771 void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
5772 {
5773 	struct rtw89_phy_stat *phystat = &rtwdev->phystat;
5774 
5775 	rtw89_phy_stat_thermal_update(rtwdev);
5776 	rtw89_phy_thermal_protect(rtwdev);
5777 	rtw89_phy_stat_rssi_update(rtwdev);
5778 
5779 	phystat->last_pkt_stat = phystat->cur_pkt_stat;
5780 	memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
5781 }
5782 
rtw89_phy_ccx_us_to_idx(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u32 time_us)5783 static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev,
5784 				   struct rtw89_bb_ctx *bb, u32 time_us)
5785 {
5786 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5787 
5788 	return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
5789 }
5790 
rtw89_phy_ccx_idx_to_us(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u16 idx)5791 static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev,
5792 				   struct rtw89_bb_ctx *bb, u16 idx)
5793 {
5794 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5795 
5796 	return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
5797 }
5798 
rtw89_phy_ccx_top_setting_init(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)5799 static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev,
5800 					   struct rtw89_bb_ctx *bb)
5801 {
5802 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5803 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5804 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5805 
5806 	env->ccx_manual_ctrl = false;
5807 	env->ccx_ongoing = false;
5808 	env->ccx_rac_lv = RTW89_RAC_RELEASE;
5809 	env->ccx_period = 0;
5810 	env->ccx_unit_idx = RTW89_CCX_32_US;
5811 
5812 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->en_mask, 1, bb->phy_idx);
5813 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1,
5814 			      bb->phy_idx);
5815 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
5816 			      bb->phy_idx);
5817 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask,
5818 			      RTW89_CCX_EDCCA_BW20_0, bb->phy_idx);
5819 }
5820 
rtw89_phy_ccx_get_report(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u16 report,u16 score)5821 static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev,
5822 				    struct rtw89_bb_ctx *bb,
5823 				    u16 report, u16 score)
5824 {
5825 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5826 	u32 numer = 0;
5827 	u16 ret = 0;
5828 
5829 	numer = report * score + (env->ccx_period >> 1);
5830 	if (env->ccx_period)
5831 		ret = numer / env->ccx_period;
5832 
5833 	return ret >= score ? score - 1 : ret;
5834 }
5835 
rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev * rtwdev,u16 time_ms,u32 * period,u32 * unit_idx)5836 static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
5837 					    u16 time_ms, u32 *period,
5838 					    u32 *unit_idx)
5839 {
5840 	u32 idx;
5841 	u8 quotient;
5842 
5843 	if (time_ms >= CCX_MAX_PERIOD)
5844 		time_ms = CCX_MAX_PERIOD;
5845 
5846 	quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD;
5847 
5848 	if (quotient < 4)
5849 		idx = RTW89_CCX_4_US;
5850 	else if (quotient < 8)
5851 		idx = RTW89_CCX_8_US;
5852 	else if (quotient < 16)
5853 		idx = RTW89_CCX_16_US;
5854 	else
5855 		idx = RTW89_CCX_32_US;
5856 
5857 	*unit_idx = idx;
5858 	*period = (time_ms * MS_TO_4US_RATIO) >> idx;
5859 
5860 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5861 		    "[Trigger Time] period:%d, unit_idx:%d\n",
5862 		    *period, *unit_idx);
5863 }
5864 
rtw89_phy_ccx_racing_release(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)5865 static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev,
5866 					 struct rtw89_bb_ctx *bb)
5867 {
5868 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5869 
5870 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5871 		    "lv:(%d)->(0)\n", env->ccx_rac_lv);
5872 
5873 	env->ccx_ongoing = false;
5874 	env->ccx_rac_lv = RTW89_RAC_RELEASE;
5875 	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
5876 }
5877 
rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,struct rtw89_ccx_para_info * para)5878 static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
5879 					      struct rtw89_bb_ctx *bb,
5880 					      struct rtw89_ccx_para_info *para)
5881 {
5882 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5883 	bool is_update = env->ifs_clm_app != para->ifs_clm_app;
5884 	u8 i = 0;
5885 	u16 *ifs_th_l = env->ifs_clm_th_l;
5886 	u16 *ifs_th_h = env->ifs_clm_th_h;
5887 	u32 ifs_th0_us = 0, ifs_th_times = 0;
5888 	u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0};
5889 
5890 	if (!is_update)
5891 		goto ifs_update_finished;
5892 
5893 	switch (para->ifs_clm_app) {
5894 	case RTW89_IFS_CLM_INIT:
5895 	case RTW89_IFS_CLM_BACKGROUND:
5896 	case RTW89_IFS_CLM_ACS:
5897 	case RTW89_IFS_CLM_DBG:
5898 	case RTW89_IFS_CLM_DIG:
5899 	case RTW89_IFS_CLM_TDMA_DIG:
5900 		ifs_th0_us = IFS_CLM_TH0_UPPER;
5901 		ifs_th_times = IFS_CLM_TH_MUL;
5902 		break;
5903 	case RTW89_IFS_CLM_DBG_MANUAL:
5904 		ifs_th0_us = para->ifs_clm_manual_th0;
5905 		ifs_th_times = para->ifs_clm_manual_th_times;
5906 		break;
5907 	default:
5908 		break;
5909 	}
5910 
5911 	/* Set sampling threshold for 4 different regions, unit in idx_cnt.
5912 	 * low[i] = high[i-1] + 1
5913 	 * high[i] = high[i-1] * ifs_th_times
5914 	 */
5915 	ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
5916 	ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
5917 	ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, bb,
5918 								 ifs_th0_us);
5919 	for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
5920 		ifs_th_l[i] = ifs_th_h[i - 1] + 1;
5921 		ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
5922 		ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, bb, ifs_th_h_us[i]);
5923 	}
5924 
5925 ifs_update_finished:
5926 	if (!is_update)
5927 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5928 			    "No need to update IFS_TH\n");
5929 
5930 	return is_update;
5931 }
5932 
rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)5933 static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev,
5934 					 struct rtw89_bb_ctx *bb)
5935 {
5936 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5937 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5938 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5939 	u8 i = 0;
5940 
5941 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask,
5942 			      env->ifs_clm_th_l[0], bb->phy_idx);
5943 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask,
5944 			      env->ifs_clm_th_l[1], bb->phy_idx);
5945 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask,
5946 			      env->ifs_clm_th_l[2], bb->phy_idx);
5947 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask,
5948 			      env->ifs_clm_th_l[3], bb->phy_idx);
5949 
5950 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask,
5951 			      env->ifs_clm_th_h[0], bb->phy_idx);
5952 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask,
5953 			      env->ifs_clm_th_h[1], bb->phy_idx);
5954 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask,
5955 			      env->ifs_clm_th_h[2], bb->phy_idx);
5956 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask,
5957 			      env->ifs_clm_th_h[3], bb->phy_idx);
5958 
5959 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
5960 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
5961 			    "Update IFS_T%d_th{low, high} : {%d, %d}\n",
5962 			    i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
5963 }
5964 
__rtw89_phy_nhm_setting_init(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)5965 static void __rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev,
5966 					 struct rtw89_bb_ctx *bb)
5967 {
5968 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5969 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5970 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5971 
5972 	env->nhm_include_cca = false;
5973 	env->nhm_mntr_time = 0;
5974 	env->nhm_sum = 0;
5975 
5976 	rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config, ccx->nhm_en_mask, bb->phy_idx);
5977 	rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_method, ccx->nhm_pwr_method_msk,
5978 				  bb->phy_idx);
5979 }
5980 
rtw89_phy_nhm_setting_init(struct rtw89_dev * rtwdev)5981 void rtw89_phy_nhm_setting_init(struct rtw89_dev *rtwdev)
5982 {
5983 	const struct rtw89_chip_info *chip = rtwdev->chip;
5984 	struct rtw89_bb_ctx *bb;
5985 
5986 	if (!chip->support_noise)
5987 		return;
5988 
5989 	rtw89_for_each_active_bb(rtwdev, bb)
5990 		__rtw89_phy_nhm_setting_init(rtwdev, bb);
5991 }
5992 
rtw89_phy_ifs_clm_setting_init(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)5993 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev,
5994 					   struct rtw89_bb_ctx *bb)
5995 {
5996 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
5997 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
5998 	const struct rtw89_ccx_regs *ccx = phy->ccx;
5999 	struct rtw89_ccx_para_info para = {};
6000 
6001 	env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
6002 	env->ifs_clm_mntr_time = 0;
6003 
6004 	para.ifs_clm_app = RTW89_IFS_CLM_INIT;
6005 	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, &para))
6006 		rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
6007 
6008 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true,
6009 			      bb->phy_idx);
6010 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true,
6011 			      bb->phy_idx);
6012 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true,
6013 			      bb->phy_idx);
6014 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true,
6015 			      bb->phy_idx);
6016 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true,
6017 			      bb->phy_idx);
6018 }
6019 
rtw89_phy_ccx_racing_ctrl(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,enum rtw89_env_racing_lv level)6020 static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
6021 				     struct rtw89_bb_ctx *bb,
6022 				     enum rtw89_env_racing_lv level)
6023 {
6024 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6025 	int ret = 0;
6026 
6027 	if (level >= RTW89_RAC_MAX_NUM) {
6028 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6029 			    "[WARNING] Wrong LV=%d\n", level);
6030 		return -EINVAL;
6031 	}
6032 
6033 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6034 		    "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing,
6035 		    env->ccx_rac_lv, level);
6036 
6037 	if (env->ccx_ongoing) {
6038 		if (level <= env->ccx_rac_lv)
6039 			ret = -EINVAL;
6040 		else
6041 			env->ccx_ongoing = false;
6042 	}
6043 
6044 	if (ret == 0)
6045 		env->ccx_rac_lv = level;
6046 
6047 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n",
6048 		    !ret);
6049 
6050 	return ret;
6051 }
6052 
rtw89_phy_ccx_trigger(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 sel)6053 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev,
6054 				  struct rtw89_bb_ctx *bb, u8 sel)
6055 {
6056 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6057 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6058 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6059 
6060 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0,
6061 			      bb->phy_idx);
6062 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0,
6063 			      bb->phy_idx);
6064 	if (sel & RTW89_PHY_ENV_MON_NHM)
6065 		rtw89_phy_write32_idx_clr(rtwdev, ccx->nhm_config,
6066 					  ccx->nhm_en_mask, bb->phy_idx);
6067 
6068 	rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1,
6069 			      bb->phy_idx);
6070 	rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1,
6071 			      bb->phy_idx);
6072 	if (sel & RTW89_PHY_ENV_MON_NHM)
6073 		rtw89_phy_write32_idx_set(rtwdev, ccx->nhm_config,
6074 					  ccx->nhm_en_mask, bb->phy_idx);
6075 
6076 	env->ccx_ongoing = true;
6077 }
6078 
rtw89_phy_ifs_clm_get_utility(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6079 static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev,
6080 					  struct rtw89_bb_ctx *bb)
6081 {
6082 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6083 	u8 i = 0;
6084 	u32 res = 0;
6085 
6086 	env->ifs_clm_tx_ratio =
6087 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_tx, PERCENT);
6088 	env->ifs_clm_edcca_excl_cca_ratio =
6089 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_edcca_excl_cca,
6090 					 PERCENT);
6091 	env->ifs_clm_cck_fa_ratio =
6092 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERCENT);
6093 	env->ifs_clm_ofdm_fa_ratio =
6094 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERCENT);
6095 	env->ifs_clm_cck_cca_excl_fa_ratio =
6096 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckcca_excl_fa,
6097 					 PERCENT);
6098 	env->ifs_clm_ofdm_cca_excl_fa_ratio =
6099 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmcca_excl_fa,
6100 					 PERCENT);
6101 	env->ifs_clm_cck_fa_permil =
6102 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERMIL);
6103 	env->ifs_clm_ofdm_fa_permil =
6104 		rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERMIL);
6105 
6106 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
6107 		if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
6108 			env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
6109 		} else {
6110 			env->ifs_clm_ifs_avg[i] =
6111 				rtw89_phy_ccx_idx_to_us(rtwdev, bb,
6112 							env->ifs_clm_avg[i]);
6113 		}
6114 
6115 		res = rtw89_phy_ccx_idx_to_us(rtwdev, bb, env->ifs_clm_cca[i]);
6116 		res += env->ifs_clm_his[i] >> 1;
6117 		if (env->ifs_clm_his[i])
6118 			res /= env->ifs_clm_his[i];
6119 		else
6120 			res = 0;
6121 		env->ifs_clm_cca_avg[i] = res;
6122 	}
6123 
6124 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6125 		    "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n",
6126 		    env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio);
6127 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6128 		    "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n",
6129 		    env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio);
6130 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6131 		    "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n",
6132 		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil);
6133 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6134 		    "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n",
6135 		    env->ifs_clm_cck_cca_excl_fa_ratio,
6136 		    env->ifs_clm_ofdm_cca_excl_fa_ratio);
6137 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6138 		    "Time:[his, ifs_avg(us), cca_avg(us)]\n");
6139 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
6140 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n",
6141 			    i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i],
6142 			    env->ifs_clm_cca_avg[i]);
6143 }
6144 
rtw89_nhm_weighted_avg(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6145 static u8 rtw89_nhm_weighted_avg(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
6146 {
6147 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6148 	u8 nhm_weight[RTW89_NHM_RPT_NUM];
6149 	u32 nhm_weighted_sum = 0;
6150 	u8 weight_zero;
6151 	u8 i;
6152 
6153 	if (env->nhm_sum == 0)
6154 		return 0;
6155 
6156 	weight_zero = clamp_t(u16, env->nhm_th[0] - RTW89_NHM_WEIGHT_OFFSET, 0, U8_MAX);
6157 
6158 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
6159 		if (i == 0)
6160 			nhm_weight[i] = weight_zero;
6161 		else if (i == (RTW89_NHM_RPT_NUM - 1))
6162 			nhm_weight[i] = env->nhm_th[i - 1] + RTW89_NHM_WEIGHT_OFFSET;
6163 		else
6164 			nhm_weight[i] = (env->nhm_th[i - 1] + env->nhm_th[i]) / 2;
6165 	}
6166 
6167 	if (rtwdev->chip->chip_id == RTL8852A || rtwdev->chip->chip_id == RTL8852B ||
6168 	    rtwdev->chip->chip_id == RTL8852C) {
6169 		if (env->nhm_th[RTW89_NHM_TH_NUM - 1] == RTW89_NHM_WA_TH) {
6170 			nhm_weight[RTW89_NHM_RPT_NUM - 1] =
6171 				env->nhm_th[RTW89_NHM_TH_NUM - 2] +
6172 				RTW89_NHM_WEIGHT_OFFSET;
6173 			nhm_weight[RTW89_NHM_RPT_NUM - 2] =
6174 				nhm_weight[RTW89_NHM_RPT_NUM - 1];
6175 		}
6176 
6177 		env->nhm_result[0] += env->nhm_result[RTW89_NHM_RPT_NUM - 1];
6178 		env->nhm_result[RTW89_NHM_RPT_NUM - 1] = 0;
6179 	}
6180 
6181 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
6182 		nhm_weighted_sum += env->nhm_result[i] * nhm_weight[i];
6183 
6184 	return (nhm_weighted_sum / env->nhm_sum) >> RTW89_NHM_TH_FACTOR;
6185 }
6186 
__rtw89_phy_nhm_get_result(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,enum rtw89_band hw_band,u16 ch_hw_value)6187 static void __rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev,
6188 				       struct rtw89_bb_ctx *bb, enum rtw89_band hw_band,
6189 				       u16 ch_hw_value)
6190 {
6191 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6192 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6193 	const struct rtw89_chip_info *chip = rtwdev->chip;
6194 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6195 	struct ieee80211_supported_band *sband;
6196 	const struct rtw89_reg_def *nhm_rpt;
6197 	enum nl80211_band band;
6198 	u32 sum = 0;
6199 	u8 chan_idx;
6200 	u8 nhm_pwr;
6201 	u8 i;
6202 
6203 	if (!rtw89_phy_read32_idx(rtwdev, ccx->nhm, ccx->nhm_ready, bb->phy_idx)) {
6204 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,  "[NHM] Get NHM report Fail\n");
6205 		return;
6206 	}
6207 
6208 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++) {
6209 		nhm_rpt = &(*chip->nhm_report)[i];
6210 
6211 		env->nhm_result[i] =
6212 			rtw89_phy_read32_idx(rtwdev, nhm_rpt->addr,
6213 					     nhm_rpt->mask, bb->phy_idx);
6214 		sum += env->nhm_result[i];
6215 	}
6216 	env->nhm_sum = sum;
6217 	nhm_pwr = rtw89_nhm_weighted_avg(rtwdev, bb);
6218 
6219 	if (!ch_hw_value)
6220 		return;
6221 
6222 	band = rtw89_hw_to_nl80211_band(hw_band);
6223 	sband = rtwdev->hw->wiphy->bands[band];
6224 	if (!sband)
6225 		return;
6226 
6227 	for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
6228 		struct ieee80211_channel *channel;
6229 		struct rtw89_nhm_report *rpt;
6230 		struct list_head *nhm_list;
6231 
6232 		channel = &sband->channels[chan_idx];
6233 		if (channel->hw_value != ch_hw_value)
6234 			continue;
6235 
6236 		rpt = &env->nhm_his[hw_band][chan_idx];
6237 		nhm_list = &env->nhm_rpt_list;
6238 
6239 		rpt->channel = channel;
6240 		rpt->noise = nhm_pwr;
6241 
6242 		if (list_empty(&rpt->list))
6243 			list_add_tail(&rpt->list, nhm_list);
6244 
6245 		return;
6246 	}
6247 
6248 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "[NHM] channel not found\n");
6249 }
6250 
rtw89_phy_nhm_get_result(struct rtw89_dev * rtwdev,enum rtw89_band hw_band,u16 ch_hw_value)6251 void rtw89_phy_nhm_get_result(struct rtw89_dev *rtwdev, enum rtw89_band hw_band,
6252 			      u16 ch_hw_value)
6253 {
6254 	const struct rtw89_chip_info *chip = rtwdev->chip;
6255 	struct rtw89_bb_ctx *bb;
6256 
6257 	if (!chip->support_noise)
6258 		return;
6259 
6260 	rtw89_for_each_active_bb(rtwdev, bb)
6261 		__rtw89_phy_nhm_get_result(rtwdev, bb, hw_band, ch_hw_value);
6262 }
6263 
rtw89_phy_ifs_clm_get_result(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6264 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev,
6265 					 struct rtw89_bb_ctx *bb)
6266 {
6267 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6268 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6269 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6270 	u8 i = 0;
6271 
6272 	if (rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
6273 				 ccx->ifs_cnt_done_mask, bb->phy_idx) == 0) {
6274 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6275 			    "Get IFS_CLM report Fail\n");
6276 		return false;
6277 	}
6278 
6279 	env->ifs_clm_tx =
6280 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
6281 				     ccx->ifs_clm_tx_cnt_msk, bb->phy_idx);
6282 	env->ifs_clm_edcca_excl_cca =
6283 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr,
6284 				     ccx->ifs_clm_edcca_excl_cca_fa_mask, bb->phy_idx);
6285 	env->ifs_clm_cckcca_excl_fa =
6286 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
6287 				     ccx->ifs_clm_cckcca_excl_fa_mask, bb->phy_idx);
6288 	env->ifs_clm_ofdmcca_excl_fa =
6289 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr,
6290 				     ccx->ifs_clm_ofdmcca_excl_fa_mask, bb->phy_idx);
6291 	env->ifs_clm_cckfa =
6292 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
6293 				     ccx->ifs_clm_cck_fa_mask, bb->phy_idx);
6294 	env->ifs_clm_ofdmfa =
6295 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr,
6296 				     ccx->ifs_clm_ofdm_fa_mask, bb->phy_idx);
6297 
6298 	env->ifs_clm_his[0] =
6299 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
6300 				     ccx->ifs_t1_his_mask, bb->phy_idx);
6301 	env->ifs_clm_his[1] =
6302 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr,
6303 				     ccx->ifs_t2_his_mask, bb->phy_idx);
6304 
6305 	env->ifs_clm_his[2] =
6306 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr2,
6307 				     ccx->ifs_t3_his_mask, bb->phy_idx);
6308 	env->ifs_clm_his[3] =
6309 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr2,
6310 				     ccx->ifs_t4_his_mask, bb->phy_idx);
6311 
6312 	env->ifs_clm_avg[0] =
6313 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
6314 				     ccx->ifs_t1_avg_mask, bb->phy_idx);
6315 	env->ifs_clm_avg[1] =
6316 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr,
6317 				     ccx->ifs_t2_avg_mask, bb->phy_idx);
6318 	env->ifs_clm_avg[2] =
6319 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
6320 				     ccx->ifs_t3_avg_mask, bb->phy_idx);
6321 	env->ifs_clm_avg[3] =
6322 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr,
6323 				     ccx->ifs_t4_avg_mask, bb->phy_idx);
6324 
6325 	env->ifs_clm_cca[0] =
6326 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
6327 				     ccx->ifs_t1_cca_mask, bb->phy_idx);
6328 	env->ifs_clm_cca[1] =
6329 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr,
6330 				     ccx->ifs_t2_cca_mask, bb->phy_idx);
6331 	env->ifs_clm_cca[2] =
6332 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
6333 				     ccx->ifs_t3_cca_mask, bb->phy_idx);
6334 	env->ifs_clm_cca[3] =
6335 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr,
6336 				     ccx->ifs_t4_cca_mask, bb->phy_idx);
6337 
6338 	env->ifs_clm_total_ifs =
6339 		rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr,
6340 				     ccx->ifs_total_mask, bb->phy_idx);
6341 
6342 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
6343 		    env->ifs_clm_total_ifs);
6344 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6345 		    "{Tx, EDCCA_exclu_cca} = {%d, %d}\n",
6346 		    env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca);
6347 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6348 		    "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n",
6349 		    env->ifs_clm_cckfa, env->ifs_clm_ofdmfa);
6350 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6351 		    "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n",
6352 		    env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa);
6353 
6354 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n");
6355 	for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
6356 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6357 			    "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
6358 			    env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
6359 
6360 	rtw89_phy_ifs_clm_get_utility(rtwdev, bb);
6361 
6362 	return true;
6363 }
6364 
rtw89_phy_nhm_th_update(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6365 static void rtw89_phy_nhm_th_update(struct rtw89_dev *rtwdev,
6366 				    struct rtw89_bb_ctx *bb)
6367 {
6368 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6369 	static const u8 nhm_th_11k[RTW89_NHM_RPT_NUM] = {
6370 		18, 21, 24, 27, 30, 35, 40, 45, 50, 55, 60, 0
6371 	};
6372 	const struct rtw89_chip_info *chip = rtwdev->chip;
6373 	const struct rtw89_reg_def *nhm_th;
6374 	u8 i;
6375 
6376 	for (i = 0; i < RTW89_NHM_RPT_NUM; i++)
6377 		env->nhm_th[i] = nhm_th_11k[i] << RTW89_NHM_TH_FACTOR;
6378 
6379 	if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
6380 	    chip->chip_id == RTL8852C)
6381 		env->nhm_th[RTW89_NHM_TH_NUM - 1] = RTW89_NHM_WA_TH;
6382 
6383 	for (i = 0; i < RTW89_NHM_TH_NUM; i++) {
6384 		nhm_th = &(*chip->nhm_th)[i];
6385 
6386 		rtw89_phy_write32_idx(rtwdev, nhm_th->addr, nhm_th->mask,
6387 				      env->nhm_th[i], bb->phy_idx);
6388 	}
6389 }
6390 
rtw89_phy_nhm_set(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,struct rtw89_ccx_para_info * para)6391 static int rtw89_phy_nhm_set(struct rtw89_dev *rtwdev,
6392 			     struct rtw89_bb_ctx *bb,
6393 			     struct rtw89_ccx_para_info *para)
6394 {
6395 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6396 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6397 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6398 	u32 unit_idx = 0;
6399 	u32 period = 0;
6400 
6401 	if (para->mntr_time == 0) {
6402 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6403 			    "[NHM] MNTR_TIME is 0\n");
6404 		return -EINVAL;
6405 	}
6406 
6407 	if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
6408 		return -EINVAL;
6409 
6410 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6411 		    "[NHM]nhm_incld_cca=%d, mntr_time=%d ms\n",
6412 		    para->nhm_incld_cca, para->mntr_time);
6413 
6414 	if (para->mntr_time != env->nhm_mntr_time) {
6415 		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
6416 						&period, &unit_idx);
6417 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6418 				      ccx->nhm_period_mask, period, bb->phy_idx);
6419 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6420 				      ccx->nhm_unit_mask, period, bb->phy_idx);
6421 
6422 		env->nhm_mntr_time = para->mntr_time;
6423 		env->ccx_period = period;
6424 		env->ccx_unit_idx = unit_idx;
6425 	}
6426 
6427 	if (para->nhm_incld_cca != env->nhm_include_cca) {
6428 		rtw89_phy_write32_idx(rtwdev, ccx->nhm_config,
6429 				      ccx->nhm_include_cca_mask, para->nhm_incld_cca,
6430 				      bb->phy_idx);
6431 
6432 		env->nhm_include_cca = para->nhm_incld_cca;
6433 	}
6434 
6435 	rtw89_phy_nhm_th_update(rtwdev, bb);
6436 
6437 	return 0;
6438 }
6439 
__rtw89_phy_nhm_trigger(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6440 static void __rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
6441 {
6442 	struct rtw89_ccx_para_info para = {
6443 		.mntr_time = RTW89_NHM_MNTR_TIME,
6444 		.rac_lv = RTW89_RAC_LV_1,
6445 		.nhm_incld_cca = true,
6446 	};
6447 
6448 	rtw89_phy_ccx_racing_release(rtwdev, bb);
6449 
6450 	rtw89_phy_nhm_set(rtwdev, bb, &para);
6451 	rtw89_phy_ccx_trigger(rtwdev, bb, RTW89_PHY_ENV_MON_NHM);
6452 }
6453 
rtw89_phy_nhm_trigger(struct rtw89_dev * rtwdev)6454 void rtw89_phy_nhm_trigger(struct rtw89_dev *rtwdev)
6455 {
6456 	const struct rtw89_chip_info *chip = rtwdev->chip;
6457 	struct rtw89_bb_ctx *bb;
6458 
6459 	if (!chip->support_noise)
6460 		return;
6461 
6462 	rtw89_for_each_active_bb(rtwdev, bb)
6463 		__rtw89_phy_nhm_trigger(rtwdev, bb);
6464 }
6465 
rtw89_phy_ifs_clm_set(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,struct rtw89_ccx_para_info * para)6466 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
6467 				 struct rtw89_bb_ctx *bb,
6468 				 struct rtw89_ccx_para_info *para)
6469 {
6470 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6471 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6472 	const struct rtw89_ccx_regs *ccx = phy->ccx;
6473 	u32 period = 0;
6474 	u32 unit_idx = 0;
6475 
6476 	if (para->mntr_time == 0) {
6477 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6478 			    "[WARN] MNTR_TIME is 0\n");
6479 		return -EINVAL;
6480 	}
6481 
6482 	if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv))
6483 		return -EINVAL;
6484 
6485 	if (para->mntr_time != env->ifs_clm_mntr_time) {
6486 		rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
6487 						&period, &unit_idx);
6488 		rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
6489 				      ccx->ifs_clm_period_mask, period, bb->phy_idx);
6490 		rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr,
6491 				      ccx->ifs_clm_cnt_unit_mask,
6492 				      unit_idx, bb->phy_idx);
6493 
6494 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6495 			    "Update IFS-CLM time ((%d)) -> ((%d))\n",
6496 			    env->ifs_clm_mntr_time, para->mntr_time);
6497 
6498 		env->ifs_clm_mntr_time = para->mntr_time;
6499 		env->ccx_period = (u16)period;
6500 		env->ccx_unit_idx = (u8)unit_idx;
6501 	}
6502 
6503 	if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, para)) {
6504 		env->ifs_clm_app = para->ifs_clm_app;
6505 		rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb);
6506 	}
6507 
6508 	return 0;
6509 }
6510 
__rtw89_phy_env_monitor_track(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6511 static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev,
6512 					  struct rtw89_bb_ctx *bb)
6513 {
6514 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6515 	struct rtw89_ccx_para_info para = {};
6516 	u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
6517 
6518 	env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
6519 	if (env->ccx_manual_ctrl) {
6520 		rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6521 			    "CCX in manual ctrl\n");
6522 		return;
6523 	}
6524 
6525 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6526 		    "BB-%d env_monitor track\n", bb->phy_idx);
6527 
6528 	/* only ifs_clm for now */
6529 	if (rtw89_phy_ifs_clm_get_result(rtwdev, bb))
6530 		env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
6531 
6532 	rtw89_phy_ccx_racing_release(rtwdev, bb);
6533 	para.mntr_time = 1900;
6534 	para.rac_lv = RTW89_RAC_LV_1;
6535 	para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
6536 
6537 	if (rtw89_phy_ifs_clm_set(rtwdev, bb, &para) == 0)
6538 		chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
6539 	if (chk_result)
6540 		rtw89_phy_ccx_trigger(rtwdev, bb, chk_result);
6541 
6542 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
6543 		    "get_result=0x%x, chk_result:0x%x\n",
6544 		    env->ccx_watchdog_result, chk_result);
6545 }
6546 
rtw89_phy_env_monitor_track(struct rtw89_dev * rtwdev)6547 void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
6548 {
6549 	struct rtw89_bb_ctx *bb;
6550 
6551 	rtw89_for_each_active_bb(rtwdev, bb)
6552 		__rtw89_phy_env_monitor_track(rtwdev, bb);
6553 }
6554 
rtw89_physts_ie_page_valid(struct rtw89_dev * rtwdev,enum rtw89_phy_status_bitmap * ie_page)6555 static bool rtw89_physts_ie_page_valid(struct rtw89_dev *rtwdev,
6556 				       enum rtw89_phy_status_bitmap *ie_page)
6557 {
6558 	const struct rtw89_chip_info *chip = rtwdev->chip;
6559 
6560 	if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM ||
6561 	    *ie_page == RTW89_RSVD_9)
6562 		return false;
6563 	else if (*ie_page > RTW89_RSVD_9 && *ie_page < RTW89_EHT_PKT)
6564 		*ie_page -= 1;
6565 
6566 	if (*ie_page == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX)
6567 		return false;
6568 
6569 	return true;
6570 }
6571 
rtw89_phy_get_ie_bitmap_addr(struct rtw89_dev * rtwdev,enum rtw89_phy_status_bitmap ie_page)6572 static u32 rtw89_phy_get_ie_bitmap_addr(struct rtw89_dev *rtwdev,
6573 					enum rtw89_phy_status_bitmap ie_page)
6574 {
6575 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6576 	static const u8 ie_page_shift = 2;
6577 
6578 	if (ie_page == RTW89_EHT_PKT)
6579 		return phy->physt_bmp_eht;
6580 
6581 	return phy->physt_bmp_start + (ie_page << ie_page_shift);
6582 }
6583 
rtw89_physts_get_ie_bitmap(struct rtw89_dev * rtwdev,enum rtw89_phy_status_bitmap ie_page,enum rtw89_phy_idx phy_idx)6584 static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev,
6585 				      enum rtw89_phy_status_bitmap ie_page,
6586 				      enum rtw89_phy_idx phy_idx)
6587 {
6588 	u32 addr;
6589 
6590 	if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
6591 		return 0;
6592 
6593 	addr = rtw89_phy_get_ie_bitmap_addr(rtwdev, ie_page);
6594 
6595 	return rtw89_phy_read32_idx(rtwdev, addr, MASKDWORD, phy_idx);
6596 }
6597 
rtw89_physts_set_ie_bitmap(struct rtw89_dev * rtwdev,enum rtw89_phy_status_bitmap ie_page,u32 val,enum rtw89_phy_idx phy_idx)6598 static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev,
6599 				       enum rtw89_phy_status_bitmap ie_page,
6600 				       u32 val, enum rtw89_phy_idx phy_idx)
6601 {
6602 	const struct rtw89_chip_info *chip = rtwdev->chip;
6603 	u32 addr;
6604 
6605 	if (!rtw89_physts_ie_page_valid(rtwdev, &ie_page))
6606 		return;
6607 
6608 	if (chip->chip_id == RTL8852A)
6609 		val &= B_PHY_STS_BITMAP_MSK_52A;
6610 
6611 	addr = rtw89_phy_get_ie_bitmap_addr(rtwdev, ie_page);
6612 	rtw89_phy_write32_idx(rtwdev, addr, MASKDWORD, val, phy_idx);
6613 }
6614 
rtw89_physts_enable_fail_report(struct rtw89_dev * rtwdev,bool enable,enum rtw89_phy_idx phy_idx)6615 static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev,
6616 					    bool enable,
6617 					    enum rtw89_phy_idx phy_idx)
6618 {
6619 	const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def;
6620 	const struct rtw89_physts_regs *physts = phy->physts;
6621 
6622 	if (enable) {
6623 		rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr,
6624 					  physts->dis_trigger_fail_mask, phy_idx);
6625 		rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr,
6626 					  physts->dis_trigger_brk_mask, phy_idx);
6627 	} else {
6628 		rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr,
6629 					  physts->dis_trigger_fail_mask, phy_idx);
6630 		rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr,
6631 					  physts->dis_trigger_brk_mask, phy_idx);
6632 	}
6633 }
6634 
rtw89_physts_enable_hdr_2(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)6635 static void rtw89_physts_enable_hdr_2(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
6636 {
6637 	const struct rtw89_chip_info *chip = rtwdev->chip;
6638 
6639 	if (chip->chip_gen == RTW89_CHIP_AX || chip->chip_id == RTL8922A)
6640 		return;
6641 
6642 	rtw89_phy_write32_idx_set(rtwdev, R_STS_HDR2_PARSING_BE4,
6643 				  B_STS_HDR2_PARSING_BE4, phy_idx);
6644 }
6645 
__rtw89_physts_parsing_init(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)6646 static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev,
6647 					enum rtw89_phy_idx phy_idx)
6648 {
6649 	const struct rtw89_chip_info *chip = rtwdev->chip;
6650 	u32 val;
6651 	u8 i;
6652 
6653 	rtw89_physts_enable_fail_report(rtwdev, false, phy_idx);
6654 
6655 	/* enable hdr_2 for 8922D (PHYSTS_BE_GEN2 above) */
6656 	rtw89_physts_enable_hdr_2(rtwdev, phy_idx);
6657 
6658 	for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) {
6659 		if (i == RTW89_RSVD_9 ||
6660 		    (i == RTW89_EHT_PKT && chip->chip_gen == RTW89_CHIP_AX))
6661 			continue;
6662 
6663 		val = rtw89_physts_get_ie_bitmap(rtwdev, i, phy_idx);
6664 		if (i == RTW89_HE_MU || i == RTW89_VHT_MU) {
6665 			val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF);
6666 		} else if (i == RTW89_TRIG_BASE_PPDU) {
6667 			val |= BIT(RTW89_PHYSTS_IE13_DL_MU_DEF) |
6668 			       BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
6669 		} else if (i >= RTW89_CCK_PKT) {
6670 			val &= ~(GENMASK(RTW89_PHYSTS_IE07_CMN_EXT_PATH_D,
6671 					 RTW89_PHYSTS_IE04_CMN_EXT_PATH_A));
6672 
6673 			if (i == RTW89_CCK_PKT)
6674 				val |= BIT(RTW89_PHYSTS_IE01_CMN_OFDM);
6675 			else if (i >= RTW89_HT_PKT)
6676 				val |= BIT(RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0);
6677 		}
6678 
6679 		rtw89_physts_set_ie_bitmap(rtwdev, i, val, phy_idx);
6680 	}
6681 }
6682 
rtw89_physts_parsing_init(struct rtw89_dev * rtwdev)6683 static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev)
6684 {
6685 	__rtw89_physts_parsing_init(rtwdev, RTW89_PHY_0);
6686 	if (rtwdev->dbcc_en)
6687 		__rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1);
6688 }
6689 
rtw89_phy_dig_read_gain_table(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,int type)6690 static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev,
6691 					  struct rtw89_bb_ctx *bb, int type)
6692 {
6693 	const struct rtw89_chip_info *chip = rtwdev->chip;
6694 	const struct rtw89_phy_dig_gain_cfg *cfg;
6695 	struct rtw89_dig_info *dig = &bb->dig;
6696 	const char *msg;
6697 	u8 i;
6698 	s8 gain_base;
6699 	s8 *gain_arr;
6700 	u32 tmp;
6701 
6702 	switch (type) {
6703 	case RTW89_DIG_GAIN_LNA_G:
6704 		gain_arr = dig->lna_gain_g;
6705 		gain_base = LNA0_GAIN;
6706 		cfg = chip->dig_table->cfg_lna_g;
6707 		msg = "lna_gain_g";
6708 		break;
6709 	case RTW89_DIG_GAIN_TIA_G:
6710 		gain_arr = dig->tia_gain_g;
6711 		gain_base = TIA0_GAIN_G;
6712 		cfg = chip->dig_table->cfg_tia_g;
6713 		msg = "tia_gain_g";
6714 		break;
6715 	case RTW89_DIG_GAIN_LNA_A:
6716 		gain_arr = dig->lna_gain_a;
6717 		gain_base = LNA0_GAIN;
6718 		cfg = chip->dig_table->cfg_lna_a;
6719 		msg = "lna_gain_a";
6720 		break;
6721 	case RTW89_DIG_GAIN_TIA_A:
6722 		gain_arr = dig->tia_gain_a;
6723 		gain_base = TIA0_GAIN_A;
6724 		cfg = chip->dig_table->cfg_tia_a;
6725 		msg = "tia_gain_a";
6726 		break;
6727 	default:
6728 		return;
6729 	}
6730 
6731 	for (i = 0; i < cfg->size; i++) {
6732 		tmp = rtw89_phy_read32_idx(rtwdev, cfg->table[i].addr,
6733 					   cfg->table[i].mask, bb->phy_idx);
6734 		tmp >>= DIG_GAIN_SHIFT;
6735 		gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
6736 		gain_base += DIG_GAIN;
6737 
6738 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n",
6739 			    msg, i, gain_arr[i]);
6740 	}
6741 }
6742 
rtw89_phy_dig_update_gain_para(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6743 static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev,
6744 					   struct rtw89_bb_ctx *bb)
6745 {
6746 	struct rtw89_dig_info *dig = &bb->dig;
6747 	u32 tmp;
6748 	u8 i;
6749 
6750 	if (!rtwdev->hal.support_igi)
6751 		return;
6752 
6753 	tmp = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PKPW,
6754 				   B_PATH0_IB_PKPW_MSK, bb->phy_idx);
6755 	dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
6756 	dig->ib_pbk = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PBK,
6757 					   B_PATH0_IB_PBK_MSK, bb->phy_idx);
6758 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
6759 		    dig->ib_pkpwr, dig->ib_pbk);
6760 
6761 	for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
6762 		rtw89_phy_dig_read_gain_table(rtwdev, bb, i);
6763 }
6764 
6765 static const u8 rssi_nolink = 22;
6766 static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104};
6767 static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
6768 static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
6769 static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
6770 
rtw89_phy_dig_update_rssi_info(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6771 static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev,
6772 					   struct rtw89_bb_ctx *bb)
6773 {
6774 	struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
6775 	struct rtw89_dig_info *dig = &bb->dig;
6776 	bool is_linked = rtwdev->total_sta_assoc > 0;
6777 
6778 	if (is_linked) {
6779 		dig->igi_rssi = ch_info->rssi_min >> 1;
6780 	} else {
6781 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
6782 		dig->igi_rssi = rssi_nolink;
6783 	}
6784 }
6785 
rtw89_phy_dig_update_para(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6786 static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev,
6787 				      struct rtw89_bb_ctx *bb)
6788 {
6789 	const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
6790 	struct rtw89_dig_info *dig = &bb->dig;
6791 	bool is_linked = rtwdev->total_sta_assoc > 0;
6792 	const u16 *fa_th_src = NULL;
6793 
6794 	switch (chan->band_type) {
6795 	case RTW89_BAND_2G:
6796 		dig->lna_gain = dig->lna_gain_g;
6797 		dig->tia_gain = dig->tia_gain_g;
6798 		fa_th_src = is_linked ? fa_th_2g : fa_th_nolink;
6799 		dig->force_gaincode_idx_en = false;
6800 		dig->dyn_pd_th_en = true;
6801 		break;
6802 	case RTW89_BAND_5G:
6803 	default:
6804 		dig->lna_gain = dig->lna_gain_a;
6805 		dig->tia_gain = dig->tia_gain_a;
6806 		fa_th_src = is_linked ? fa_th_5g : fa_th_nolink;
6807 		dig->force_gaincode_idx_en = true;
6808 		dig->dyn_pd_th_en = true;
6809 		break;
6810 	}
6811 	memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th));
6812 	memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
6813 }
6814 
6815 static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20;
6816 static const u8 igi_max_performance_mode = 0x5a;
6817 static const u8 dynamic_pd_threshold_max;
6818 
rtw89_phy_dig_para_reset(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6819 static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev,
6820 				     struct rtw89_bb_ctx *bb)
6821 {
6822 	struct rtw89_dig_info *dig = &bb->dig;
6823 
6824 	dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
6825 	dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
6826 	dig->cur_gaincode.rxb_idx = RXB_IDX_MAX;
6827 	dig->force_gaincode.lna_idx = LNA_IDX_MAX;
6828 	dig->force_gaincode.tia_idx = TIA_IDX_MAX;
6829 	dig->force_gaincode.rxb_idx = RXB_IDX_MAX;
6830 
6831 	dig->dyn_igi_max = igi_max_performance_mode;
6832 	dig->dyn_igi_min = dynamic_igi_min;
6833 	dig->dyn_pd_th_max = dynamic_pd_threshold_max;
6834 	dig->pd_low_th_ofst = pd_low_th_offset;
6835 	dig->is_linked_pre = false;
6836 }
6837 
__rtw89_phy_dig_init(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6838 static void __rtw89_phy_dig_init(struct rtw89_dev *rtwdev,
6839 				 struct rtw89_bb_ctx *bb)
6840 {
6841 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig_init\n", bb->phy_idx);
6842 
6843 	rtw89_phy_dig_update_gain_para(rtwdev, bb);
6844 	rtw89_phy_dig_reset(rtwdev, bb);
6845 }
6846 
rtw89_phy_dig_init(struct rtw89_dev * rtwdev)6847 static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
6848 {
6849 	struct rtw89_bb_ctx *bb;
6850 
6851 	rtw89_for_each_capab_bb(rtwdev, bb)
6852 		__rtw89_phy_dig_init(rtwdev, bb);
6853 }
6854 
rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 rssi)6855 static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev,
6856 					struct rtw89_bb_ctx *bb, u8 rssi)
6857 {
6858 	struct rtw89_dig_info *dig = &bb->dig;
6859 	u8 lna_idx;
6860 
6861 	if (rssi < dig->igi_rssi_th[0])
6862 		lna_idx = RTW89_DIG_GAIN_LNA_IDX6;
6863 	else if (rssi < dig->igi_rssi_th[1])
6864 		lna_idx = RTW89_DIG_GAIN_LNA_IDX5;
6865 	else if (rssi < dig->igi_rssi_th[2])
6866 		lna_idx = RTW89_DIG_GAIN_LNA_IDX4;
6867 	else if (rssi < dig->igi_rssi_th[3])
6868 		lna_idx = RTW89_DIG_GAIN_LNA_IDX3;
6869 	else if (rssi < dig->igi_rssi_th[4])
6870 		lna_idx = RTW89_DIG_GAIN_LNA_IDX2;
6871 	else
6872 		lna_idx = RTW89_DIG_GAIN_LNA_IDX1;
6873 
6874 	return lna_idx;
6875 }
6876 
rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 rssi)6877 static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev,
6878 					struct rtw89_bb_ctx *bb, u8 rssi)
6879 {
6880 	struct rtw89_dig_info *dig = &bb->dig;
6881 	u8 tia_idx;
6882 
6883 	if (rssi < dig->igi_rssi_th[0])
6884 		tia_idx = RTW89_DIG_GAIN_TIA_IDX1;
6885 	else
6886 		tia_idx = RTW89_DIG_GAIN_TIA_IDX0;
6887 
6888 	return tia_idx;
6889 }
6890 
6891 #define IB_PBK_BASE 110
6892 #define WB_RSSI_BASE 10
rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 rssi,struct rtw89_agc_gaincode_set * set)6893 static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev,
6894 					struct rtw89_bb_ctx *bb, u8 rssi,
6895 					struct rtw89_agc_gaincode_set *set)
6896 {
6897 	struct rtw89_dig_info *dig = &bb->dig;
6898 	s8 lna_gain = dig->lna_gain[set->lna_idx];
6899 	s8 tia_gain = dig->tia_gain[set->tia_idx];
6900 	s32 wb_rssi = rssi + lna_gain + tia_gain;
6901 	s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE;
6902 	u8 rxb_idx;
6903 
6904 	rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi;
6905 	rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX);
6906 
6907 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n",
6908 		    wb_rssi, rxb_idx_tmp);
6909 
6910 	return rxb_idx;
6911 }
6912 
rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 rssi,struct rtw89_agc_gaincode_set * set)6913 static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev,
6914 					   struct rtw89_bb_ctx *bb, u8 rssi,
6915 					   struct rtw89_agc_gaincode_set *set)
6916 {
6917 	set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, bb, rssi);
6918 	set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, bb, rssi);
6919 	set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, bb, rssi, set);
6920 
6921 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6922 		    "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
6923 		    rssi, set->lna_idx, set->tia_idx, set->rxb_idx);
6924 }
6925 
6926 #define IGI_OFFSET_MAX 25
6927 #define IGI_OFFSET_MUL 2
rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)6928 static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev,
6929 					    struct rtw89_bb_ctx *bb)
6930 {
6931 	struct rtw89_dig_info *dig = &bb->dig;
6932 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
6933 	enum rtw89_dig_noisy_level noisy_lv;
6934 	u8 igi_offset = dig->fa_rssi_ofst;
6935 	u16 fa_ratio = 0;
6936 
6937 	fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil;
6938 
6939 	if (fa_ratio < dig->fa_th[0])
6940 		noisy_lv = RTW89_DIG_NOISY_LEVEL0;
6941 	else if (fa_ratio < dig->fa_th[1])
6942 		noisy_lv = RTW89_DIG_NOISY_LEVEL1;
6943 	else if (fa_ratio < dig->fa_th[2])
6944 		noisy_lv = RTW89_DIG_NOISY_LEVEL2;
6945 	else if (fa_ratio < dig->fa_th[3])
6946 		noisy_lv = RTW89_DIG_NOISY_LEVEL3;
6947 	else
6948 		noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX;
6949 
6950 	if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2)
6951 		igi_offset = 0;
6952 	else
6953 		igi_offset += noisy_lv * IGI_OFFSET_MUL;
6954 
6955 	igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX);
6956 	dig->fa_rssi_ofst = igi_offset;
6957 
6958 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6959 		    "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n",
6960 		    dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]);
6961 
6962 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
6963 		    "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n",
6964 		    env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil,
6965 		    env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil,
6966 		    noisy_lv, igi_offset);
6967 }
6968 
rtw89_phy_dig_set_lna_idx(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 lna_idx)6969 static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev,
6970 				      struct rtw89_bb_ctx *bb, u8 lna_idx)
6971 {
6972 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6973 
6974 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_lna_init.addr,
6975 			      dig_regs->p0_lna_init.mask, lna_idx, bb->phy_idx);
6976 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_lna_init.addr,
6977 			      dig_regs->p1_lna_init.mask, lna_idx, bb->phy_idx);
6978 }
6979 
rtw89_phy_dig_set_tia_idx(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 tia_idx)6980 static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev,
6981 				      struct rtw89_bb_ctx *bb, u8 tia_idx)
6982 {
6983 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6984 
6985 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_tia_init.addr,
6986 			      dig_regs->p0_tia_init.mask, tia_idx, bb->phy_idx);
6987 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_tia_init.addr,
6988 			      dig_regs->p1_tia_init.mask, tia_idx, bb->phy_idx);
6989 }
6990 
rtw89_phy_dig_set_rxb_idx(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 rxb_idx)6991 static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev,
6992 				      struct rtw89_bb_ctx *bb, u8 rxb_idx)
6993 {
6994 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6995 
6996 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_rxb_init.addr,
6997 			      dig_regs->p0_rxb_init.mask, rxb_idx, bb->phy_idx);
6998 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_rxb_init.addr,
6999 			      dig_regs->p1_rxb_init.mask, rxb_idx, bb->phy_idx);
7000 }
7001 
rtw89_phy_dig_set_igi_cr(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,const struct rtw89_agc_gaincode_set set)7002 static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
7003 				     struct rtw89_bb_ctx *bb,
7004 				     const struct rtw89_agc_gaincode_set set)
7005 {
7006 	if (!rtwdev->hal.support_igi)
7007 		return;
7008 
7009 	rtw89_phy_dig_set_lna_idx(rtwdev, bb, set.lna_idx);
7010 	rtw89_phy_dig_set_tia_idx(rtwdev, bb, set.tia_idx);
7011 	rtw89_phy_dig_set_rxb_idx(rtwdev, bb, set.rxb_idx);
7012 
7013 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
7014 		    set.lna_idx, set.tia_idx, set.rxb_idx);
7015 }
7016 
rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,bool enable)7017 static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
7018 						   struct rtw89_bb_ctx *bb,
7019 						   bool enable)
7020 {
7021 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7022 
7023 	if (rtwdev->chip->chip_gen != RTW89_CHIP_AX)
7024 		return;
7025 
7026 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_p20_pagcugc_en.addr,
7027 			      dig_regs->p0_p20_pagcugc_en.mask, enable, bb->phy_idx);
7028 	rtw89_phy_write32_idx(rtwdev, dig_regs->p0_s20_pagcugc_en.addr,
7029 			      dig_regs->p0_s20_pagcugc_en.mask, enable, bb->phy_idx);
7030 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_p20_pagcugc_en.addr,
7031 			      dig_regs->p1_p20_pagcugc_en.mask, enable, bb->phy_idx);
7032 	rtw89_phy_write32_idx(rtwdev, dig_regs->p1_s20_pagcugc_en.addr,
7033 			      dig_regs->p1_s20_pagcugc_en.mask, enable, bb->phy_idx);
7034 
7035 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
7036 }
7037 
rtw89_phy_dig_config_igi(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)7038 static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev,
7039 				     struct rtw89_bb_ctx *bb)
7040 {
7041 	struct rtw89_dig_info *dig = &bb->dig;
7042 
7043 	if (!rtwdev->hal.support_igi)
7044 		return;
7045 
7046 	if (dig->force_gaincode_idx_en) {
7047 		rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
7048 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
7049 			    "Force gaincode index enabled.\n");
7050 	} else {
7051 		rtw89_phy_dig_gaincode_by_rssi(rtwdev, bb, dig->igi_fa_rssi,
7052 					       &dig->cur_gaincode);
7053 		rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->cur_gaincode);
7054 	}
7055 }
7056 
rtw89_phy_dig_cal_under_region(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,const struct rtw89_chan * chan)7057 static u8 rtw89_phy_dig_cal_under_region(struct rtw89_dev *rtwdev,
7058 					 struct rtw89_bb_ctx *bb,
7059 					 const struct rtw89_chan *chan)
7060 {
7061 	enum rtw89_bandwidth cbw = chan->band_width;
7062 	struct rtw89_dig_info *dig = &bb->dig;
7063 	u8 under_region = dig->pd_low_th_ofst;
7064 
7065 	if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
7066 		under_region += PD_TH_SB_FLTR_CMP_VAL;
7067 
7068 	switch (cbw) {
7069 	case RTW89_CHANNEL_WIDTH_40:
7070 		under_region += PD_TH_BW40_CMP_VAL;
7071 		break;
7072 	case RTW89_CHANNEL_WIDTH_80:
7073 		under_region += PD_TH_BW80_CMP_VAL;
7074 		break;
7075 	case RTW89_CHANNEL_WIDTH_160:
7076 		under_region += PD_TH_BW160_CMP_VAL;
7077 		break;
7078 	case RTW89_CHANNEL_WIDTH_20:
7079 		fallthrough;
7080 	default:
7081 		under_region += PD_TH_BW20_CMP_VAL;
7082 		break;
7083 	}
7084 
7085 	return under_region;
7086 }
7087 
__rtw89_phy_dig_dyn_pd_th(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 rssi,bool enable,const struct rtw89_chan * chan)7088 static u32 __rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
7089 				     struct rtw89_bb_ctx *bb,
7090 				     u8 rssi, bool enable,
7091 				     const struct rtw89_chan *chan)
7092 {
7093 	struct rtw89_dig_info *dig = &bb->dig;
7094 	u8 ofdm_cca_th, under_region;
7095 	u8 final_rssi;
7096 	u32 pd_val;
7097 
7098 	under_region = rtw89_phy_dig_cal_under_region(rtwdev, bb, chan);
7099 	dig->dyn_pd_th_max = dig->igi_rssi;
7100 
7101 	final_rssi = min_t(u8, rssi, dig->igi_rssi);
7102 	ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
7103 			      PD_TH_MAX_RSSI + under_region);
7104 
7105 	if (enable) {
7106 		pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
7107 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
7108 			    "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
7109 			    final_rssi, ofdm_cca_th, under_region, pd_val);
7110 	} else {
7111 		pd_val = 0;
7112 		rtw89_debug(rtwdev, RTW89_DBG_DIG,
7113 			    "Dynamic PD th disabled, Set PD_low_bd=0\n");
7114 	}
7115 
7116 	return pd_val;
7117 }
7118 
rtw89_phy_dig_dyn_pd_th(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,u8 rssi,bool enable)7119 static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev,
7120 				    struct rtw89_bb_ctx *bb,
7121 				    u8 rssi, bool enable)
7122 {
7123 	const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx);
7124 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7125 	struct rtw89_dig_info *dig = &bb->dig;
7126 	u8 final_rssi, under_region = dig->pd_low_th_ofst;
7127 	s8 cck_cca_th;
7128 	u32 pd_val;
7129 
7130 	pd_val = __rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi, enable, chan);
7131 	dig->bak_dig = pd_val;
7132 
7133 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
7134 			      dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
7135 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
7136 			      dig_regs->pd_spatial_reuse_en, enable, bb->phy_idx);
7137 
7138 	if (!rtwdev->hal.support_cckpd)
7139 		return;
7140 
7141 	final_rssi = min_t(u8, rssi, dig->igi_rssi);
7142 	under_region = rtw89_phy_dig_cal_under_region(rtwdev, bb, chan);
7143 	cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
7144 	pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
7145 
7146 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
7147 		    "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
7148 		    final_rssi, cck_cca_th, under_region, pd_val);
7149 
7150 	rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_reg,
7151 			      dig_regs->bmode_cca_rssi_limit_en, enable, bb->phy_idx);
7152 	rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_lower_bound_reg,
7153 			      dig_regs->bmode_rssi_nocca_low_th_mask, pd_val, bb->phy_idx);
7154 }
7155 
rtw89_phy_dig_reset(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)7156 void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7157 {
7158 	struct rtw89_dig_info *dig = &bb->dig;
7159 
7160 	dig->bypass_dig = false;
7161 	rtw89_phy_dig_para_reset(rtwdev, bb);
7162 	rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode);
7163 	rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi_nolink, false);
7164 	rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
7165 	rtw89_phy_dig_update_para(rtwdev, bb);
7166 }
7167 
7168 #define IGI_RSSI_MIN 10
7169 #define ABS_IGI_MIN 0xc
7170 static
rtw89_phy_cal_igi_fa_rssi(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)7171 void rtw89_phy_cal_igi_fa_rssi(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7172 {
7173 	struct rtw89_dig_info *dig = &bb->dig;
7174 	u8 igi_min;
7175 
7176 	rtw89_phy_dig_igi_offset_by_env(rtwdev, bb);
7177 
7178 	igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0);
7179 	dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode);
7180 	dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN);
7181 
7182 	if (dig->dyn_igi_max >= dig->dyn_igi_min) {
7183 		dig->igi_fa_rssi += dig->fa_rssi_ofst;
7184 		dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
7185 					 dig->dyn_igi_max);
7186 	} else {
7187 		dig->igi_fa_rssi = dig->dyn_igi_max;
7188 	}
7189 }
7190 
7191 struct rtw89_phy_iter_mcc_dig {
7192 	struct rtw89_vif_link *rtwvif_link;
7193 	bool has_sta;
7194 	u8 rssi_min;
7195 };
7196 
rtw89_phy_set_mcc_dig(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_bb_ctx * bb,u8 rssi_min,u8 mcc_role_idx,bool is_linked)7197 static void rtw89_phy_set_mcc_dig(struct rtw89_dev *rtwdev,
7198 				  struct rtw89_vif_link *rtwvif_link,
7199 				  struct rtw89_bb_ctx *bb,
7200 				  u8 rssi_min, u8 mcc_role_idx,
7201 				  bool is_linked)
7202 {
7203 	struct rtw89_dig_info *dig = &bb->dig;
7204 	const struct rtw89_chan *chan;
7205 	u8 pd_val;
7206 
7207 	if (is_linked) {
7208 		dig->igi_rssi = rssi_min >> 1;
7209 		dig->igi_fa_rssi = dig->igi_rssi;
7210 	} else {
7211 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
7212 		dig->igi_rssi = rssi_nolink;
7213 		dig->igi_fa_rssi = dig->igi_rssi;
7214 	}
7215 
7216 	chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
7217 	rtw89_phy_cal_igi_fa_rssi(rtwdev, bb);
7218 	pd_val = __rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi,
7219 					   is_linked, chan);
7220 	rtw89_fw_h2c_mcc_dig(rtwdev, rtwvif_link->chanctx_idx,
7221 			     mcc_role_idx, pd_val, true);
7222 
7223 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
7224 		    "MCC chanctx_idx %d chan %d rssi %d pd_val %d",
7225 		    rtwvif_link->chanctx_idx, chan->primary_channel,
7226 		    dig->igi_rssi, pd_val);
7227 }
7228 
rtw89_phy_set_mcc_dig_iter(void * data,struct ieee80211_sta * sta)7229 static void rtw89_phy_set_mcc_dig_iter(void *data, struct ieee80211_sta *sta)
7230 {
7231 	struct rtw89_phy_iter_mcc_dig *mcc_dig = (struct rtw89_phy_iter_mcc_dig *)data;
7232 	unsigned int link_id = mcc_dig->rtwvif_link->link_id;
7233 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
7234 	struct rtw89_sta_link *rtwsta_link;
7235 
7236 	if (rtwsta->rtwvif != mcc_dig->rtwvif_link->rtwvif)
7237 		return;
7238 
7239 	rtwsta_link = rtwsta->links[link_id];
7240 	if (!rtwsta_link)
7241 		return;
7242 
7243 	mcc_dig->has_sta = true;
7244 	if (ewma_rssi_read(&rtwsta_link->avg_rssi) < mcc_dig->rssi_min)
7245 		mcc_dig->rssi_min = ewma_rssi_read(&rtwsta_link->avg_rssi);
7246 }
7247 
rtw89_phy_dig_mcc(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)7248 static void rtw89_phy_dig_mcc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7249 {
7250 	struct rtw89_phy_iter_mcc_dig mcc_dig;
7251 	struct rtw89_vif_link *rtwvif_link;
7252 	struct rtw89_mcc_links_info info;
7253 	int i;
7254 
7255 	rtw89_mcc_get_links(rtwdev, &info);
7256 	for (i = 0; i < ARRAY_SIZE(info.links); i++) {
7257 		rtwvif_link = info.links[i];
7258 		if (!rtwvif_link)
7259 			continue;
7260 
7261 		memset(&mcc_dig, 0, sizeof(mcc_dig));
7262 		mcc_dig.rtwvif_link = rtwvif_link;
7263 		mcc_dig.has_sta = false;
7264 		mcc_dig.rssi_min = U8_MAX;
7265 		ieee80211_iterate_stations_atomic(rtwdev->hw,
7266 						  rtw89_phy_set_mcc_dig_iter,
7267 						  &mcc_dig);
7268 
7269 		rtw89_phy_set_mcc_dig(rtwdev, rtwvif_link, bb,
7270 				      mcc_dig.rssi_min, i, mcc_dig.has_sta);
7271 	}
7272 }
7273 
rtw89_phy_dig_ctrl(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,bool pause_dig,bool restore)7274 static void rtw89_phy_dig_ctrl(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb,
7275 			       bool pause_dig, bool restore)
7276 {
7277 	const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
7278 	struct rtw89_dig_info *dig = &bb->dig;
7279 	bool en_dig;
7280 	u32 pd_val;
7281 
7282 	if (dig->pause_dig == pause_dig)
7283 		return;
7284 
7285 	if (pause_dig) {
7286 		en_dig = false;
7287 		pd_val = 0;
7288 	} else {
7289 		en_dig = rtwdev->total_sta_assoc > 0;
7290 		pd_val = restore ? dig->bak_dig : 0;
7291 	}
7292 
7293 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s <%s> PD_low=%d", __func__,
7294 		    pause_dig ? "suspend" : "resume", pd_val);
7295 
7296 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
7297 			      dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx);
7298 	rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg,
7299 			      dig_regs->pd_spatial_reuse_en, en_dig, bb->phy_idx);
7300 
7301 	dig->pause_dig = pause_dig;
7302 }
7303 
rtw89_phy_dig_suspend(struct rtw89_dev * rtwdev)7304 void rtw89_phy_dig_suspend(struct rtw89_dev *rtwdev)
7305 {
7306 	struct rtw89_bb_ctx *bb;
7307 
7308 	rtw89_for_each_active_bb(rtwdev, bb)
7309 		rtw89_phy_dig_ctrl(rtwdev, bb, true, false);
7310 }
7311 
rtw89_phy_dig_resume(struct rtw89_dev * rtwdev,bool restore)7312 void rtw89_phy_dig_resume(struct rtw89_dev *rtwdev, bool restore)
7313 {
7314 	struct rtw89_bb_ctx *bb;
7315 
7316 	rtw89_for_each_active_bb(rtwdev, bb)
7317 		rtw89_phy_dig_ctrl(rtwdev, bb, false, restore);
7318 }
7319 
__rtw89_phy_dig(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)7320 static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7321 {
7322 	struct rtw89_dig_info *dig = &bb->dig;
7323 	bool is_linked = rtwdev->total_sta_assoc > 0;
7324 	enum rtw89_entity_mode mode;
7325 
7326 	if (unlikely(dig->bypass_dig)) {
7327 		dig->bypass_dig = false;
7328 		return;
7329 	}
7330 
7331 	rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig track\n", bb->phy_idx);
7332 
7333 	rtw89_phy_dig_update_rssi_info(rtwdev, bb);
7334 
7335 	mode = rtw89_get_entity_mode(rtwdev);
7336 	if (mode == RTW89_ENTITY_MODE_MCC) {
7337 		rtw89_phy_dig_mcc(rtwdev, bb);
7338 		return;
7339 	}
7340 
7341 	if (unlikely(dig->pause_dig))
7342 		return;
7343 
7344 	if (!dig->is_linked_pre && is_linked) {
7345 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
7346 		rtw89_phy_dig_update_para(rtwdev, bb);
7347 		dig->igi_fa_rssi = dig->igi_rssi;
7348 	} else if (dig->is_linked_pre && !is_linked) {
7349 		rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
7350 		rtw89_phy_dig_update_para(rtwdev, bb);
7351 		dig->igi_fa_rssi = dig->igi_rssi;
7352 	}
7353 	dig->is_linked_pre = is_linked;
7354 
7355 	rtw89_phy_cal_igi_fa_rssi(rtwdev, bb);
7356 
7357 	rtw89_debug(rtwdev, RTW89_DBG_DIG,
7358 		    "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n",
7359 		    dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
7360 		    dig->igi_fa_rssi);
7361 
7362 	rtw89_phy_dig_config_igi(rtwdev, bb);
7363 
7364 	rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi, dig->dyn_pd_th_en);
7365 
7366 	if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
7367 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, true);
7368 	else
7369 		rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false);
7370 }
7371 
rtw89_phy_dig(struct rtw89_dev * rtwdev)7372 void rtw89_phy_dig(struct rtw89_dev *rtwdev)
7373 {
7374 	struct rtw89_bb_ctx *bb;
7375 
7376 	rtw89_for_each_active_bb(rtwdev, bb)
7377 		__rtw89_phy_dig(rtwdev, bb);
7378 }
7379 
__rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)7380 static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev,
7381 					     struct rtw89_sta_link *rtwsta_link)
7382 {
7383 	struct rtw89_hal *hal = &rtwdev->hal;
7384 	u8 rssi_a, rssi_b;
7385 	u32 candidate;
7386 
7387 	rssi_a = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_A]);
7388 	rssi_b = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_B]);
7389 
7390 	if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH)
7391 		candidate = RF_A;
7392 	else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH)
7393 		candidate = RF_B;
7394 	else
7395 		return;
7396 
7397 	if (hal->antenna_tx == candidate)
7398 		return;
7399 
7400 	hal->antenna_tx = candidate;
7401 	rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta_link);
7402 
7403 	if (hal->antenna_tx == RF_A) {
7404 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12);
7405 		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11);
7406 	} else if (hal->antenna_tx == RF_B) {
7407 		rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11);
7408 		rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12);
7409 	}
7410 }
7411 
rtw89_phy_tx_path_div_sta_iter(void * data,struct ieee80211_sta * sta)7412 static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta)
7413 {
7414 	struct rtw89_sta *rtwsta = sta_to_rtwsta(sta);
7415 	struct rtw89_dev *rtwdev = rtwsta->rtwdev;
7416 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
7417 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
7418 	struct rtw89_vif_link *rtwvif_link;
7419 	struct rtw89_sta_link *rtwsta_link;
7420 	unsigned int link_id;
7421 	bool *done = data;
7422 
7423 	if (WARN(ieee80211_vif_is_mld(vif), "MLD mix path_div\n"))
7424 		return;
7425 
7426 	if (sta->tdls)
7427 		return;
7428 
7429 	if (*done)
7430 		return;
7431 
7432 	rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) {
7433 		rtwvif_link = rtwsta_link->rtwvif_link;
7434 		if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION)
7435 			continue;
7436 
7437 		*done = true;
7438 		__rtw89_phy_tx_path_div_sta_iter(rtwdev, rtwsta_link);
7439 		return;
7440 	}
7441 }
7442 
rtw89_phy_tx_path_div_track(struct rtw89_dev * rtwdev)7443 void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev)
7444 {
7445 	struct rtw89_hal *hal = &rtwdev->hal;
7446 	bool done = false;
7447 
7448 	if (!hal->tx_path_diversity)
7449 		return;
7450 
7451 	ieee80211_iterate_stations_atomic(rtwdev->hw,
7452 					  rtw89_phy_tx_path_div_sta_iter,
7453 					  &done);
7454 }
7455 
7456 #define ANTDIV_MAIN 0
7457 #define ANTDIV_AUX 1
7458 
rtw89_phy_antdiv_set_ant(struct rtw89_dev * rtwdev)7459 static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev)
7460 {
7461 	struct rtw89_hal *hal = &rtwdev->hal;
7462 	u8 default_ant, optional_ant;
7463 
7464 	if (!hal->ant_diversity || hal->antenna_tx == 0)
7465 		return;
7466 
7467 	if (hal->antenna_tx == RF_B) {
7468 		default_ant = ANTDIV_AUX;
7469 		optional_ant = ANTDIV_MAIN;
7470 	} else {
7471 		default_ant = ANTDIV_MAIN;
7472 		optional_ant = ANTDIV_AUX;
7473 	}
7474 
7475 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL,
7476 			      default_ant, RTW89_PHY_0);
7477 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI,
7478 			      default_ant, RTW89_PHY_0);
7479 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT,
7480 			      optional_ant, RTW89_PHY_0);
7481 	rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI,
7482 			      default_ant, RTW89_PHY_0);
7483 }
7484 
rtw89_phy_swap_hal_antenna(struct rtw89_dev * rtwdev)7485 static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev)
7486 {
7487 	struct rtw89_hal *hal = &rtwdev->hal;
7488 
7489 	hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A;
7490 	hal->antenna_tx = hal->antenna_rx;
7491 }
7492 
rtw89_phy_antdiv_decision_state(struct rtw89_dev * rtwdev)7493 static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev)
7494 {
7495 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7496 	struct rtw89_hal *hal = &rtwdev->hal;
7497 	bool no_change = false;
7498 	u8 main_rssi, aux_rssi;
7499 	u8 main_evm, aux_evm;
7500 	u32 candidate;
7501 
7502 	antdiv->get_stats = false;
7503 	antdiv->training_count = 0;
7504 
7505 	main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats);
7506 	main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats);
7507 	aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats);
7508 	aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats);
7509 
7510 	if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH)
7511 		candidate = RF_A;
7512 	else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH)
7513 		candidate = RF_B;
7514 	else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
7515 		candidate = RF_A;
7516 	else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH)
7517 		candidate = RF_B;
7518 	else
7519 		no_change = true;
7520 
7521 	if (no_change) {
7522 		/* swap back from training antenna to original */
7523 		rtw89_phy_swap_hal_antenna(rtwdev);
7524 		return;
7525 	}
7526 
7527 	hal->antenna_tx = candidate;
7528 	hal->antenna_rx = candidate;
7529 }
7530 
rtw89_phy_antdiv_training_state(struct rtw89_dev * rtwdev)7531 static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev)
7532 {
7533 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7534 	u64 state_period;
7535 
7536 	if (antdiv->training_count % 2 == 0) {
7537 		if (antdiv->training_count == 0)
7538 			rtw89_phy_antdiv_sts_reset(rtwdev);
7539 
7540 		antdiv->get_stats = true;
7541 		state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL);
7542 	} else {
7543 		antdiv->get_stats = false;
7544 		state_period = msecs_to_jiffies(ANTDIV_DELAY);
7545 
7546 		rtw89_phy_swap_hal_antenna(rtwdev);
7547 		rtw89_phy_antdiv_set_ant(rtwdev);
7548 	}
7549 
7550 	antdiv->training_count++;
7551 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work,
7552 				 state_period);
7553 }
7554 
rtw89_phy_antdiv_work(struct wiphy * wiphy,struct wiphy_work * work)7555 void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work)
7556 {
7557 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
7558 						antdiv_work.work);
7559 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7560 
7561 	lockdep_assert_wiphy(wiphy);
7562 
7563 	if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) {
7564 		rtw89_phy_antdiv_training_state(rtwdev);
7565 	} else {
7566 		rtw89_phy_antdiv_decision_state(rtwdev);
7567 		rtw89_phy_antdiv_set_ant(rtwdev);
7568 	}
7569 }
7570 
rtw89_phy_antdiv_track(struct rtw89_dev * rtwdev)7571 void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev)
7572 {
7573 	struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv;
7574 	struct rtw89_hal *hal = &rtwdev->hal;
7575 	u8 rssi, rssi_pre;
7576 
7577 	if (!hal->ant_diversity || hal->ant_diversity_fixed)
7578 		return;
7579 
7580 	rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats);
7581 	rssi_pre = antdiv->rssi_pre;
7582 	antdiv->rssi_pre = rssi;
7583 	rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats);
7584 
7585 	if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH)
7586 		return;
7587 
7588 	antdiv->training_count = 0;
7589 	wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work, 0);
7590 }
7591 
__rtw89_phy_env_monitor_init(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)7592 static void __rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev,
7593 					 struct rtw89_bb_ctx *bb)
7594 {
7595 	rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
7596 		    "BB-%d env_monitor init\n", bb->phy_idx);
7597 
7598 	rtw89_phy_ccx_top_setting_init(rtwdev, bb);
7599 	rtw89_phy_ifs_clm_setting_init(rtwdev, bb);
7600 }
7601 
rtw89_phy_env_monitor_init(struct rtw89_dev * rtwdev)7602 static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
7603 {
7604 	struct rtw89_bb_ctx *bb;
7605 
7606 	rtw89_for_each_capab_bb(rtwdev, bb)
7607 		__rtw89_phy_env_monitor_init(rtwdev, bb);
7608 }
7609 
__rtw89_phy_edcca_init(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)7610 static void __rtw89_phy_edcca_init(struct rtw89_dev *rtwdev,
7611 				   struct rtw89_bb_ctx *bb)
7612 {
7613 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
7614 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
7615 
7616 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca init\n", bb->phy_idx);
7617 
7618 	memset(edcca_bak, 0, sizeof(*edcca_bak));
7619 
7620 	if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) {
7621 		rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0);
7622 		rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2);
7623 		rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1);
7624 		rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0);
7625 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0);
7626 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0);
7627 		rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0);
7628 		rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1);
7629 		rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1);
7630 	}
7631 
7632 	rtw89_phy_write32_idx(rtwdev, edcca_regs->tx_collision_t2r_st,
7633 			      edcca_regs->tx_collision_t2r_st_mask, 0x29, bb->phy_idx);
7634 }
7635 
rtw89_phy_edcca_init(struct rtw89_dev * rtwdev)7636 static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev)
7637 {
7638 	struct rtw89_bb_ctx *bb;
7639 
7640 	rtw89_for_each_capab_bb(rtwdev, bb)
7641 		__rtw89_phy_edcca_init(rtwdev, bb);
7642 }
7643 
rtw89_phy_dm_init(struct rtw89_dev * rtwdev)7644 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
7645 {
7646 	rtw89_phy_stat_init(rtwdev);
7647 
7648 	rtw89_chip_bb_sethw(rtwdev);
7649 
7650 	rtw89_phy_env_monitor_init(rtwdev);
7651 	rtw89_phy_nhm_setting_init(rtwdev);
7652 	rtw89_physts_parsing_init(rtwdev);
7653 	rtw89_phy_dig_init(rtwdev);
7654 	rtw89_phy_cfo_init(rtwdev);
7655 	rtw89_phy_bb_wrap_init(rtwdev);
7656 	rtw89_phy_edcca_init(rtwdev);
7657 	rtw89_phy_ch_info_init(rtwdev);
7658 	rtw89_phy_ul_tb_info_init(rtwdev);
7659 	rtw89_phy_antdiv_init(rtwdev);
7660 	rtw89_chip_rfe_gpio(rtwdev);
7661 	rtw89_phy_antdiv_set_ant(rtwdev);
7662 
7663 	rtw89_chip_rfk_hw_init(rtwdev);
7664 	rtw89_phy_init_rf_nctl(rtwdev);
7665 	rtw89_chip_rfk_init(rtwdev);
7666 	rtw89_chip_set_txpwr_ctrl(rtwdev);
7667 	rtw89_chip_power_trim(rtwdev);
7668 	rtw89_chip_cfg_txrx_path(rtwdev);
7669 }
7670 
rtw89_phy_dm_reinit(struct rtw89_dev * rtwdev)7671 void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev)
7672 {
7673 	rtw89_phy_env_monitor_init(rtwdev);
7674 	rtw89_physts_parsing_init(rtwdev);
7675 }
7676 
__rtw89_phy_dm_init_data(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)7677 static void __rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
7678 {
7679 	struct rtw89_env_monitor_info *env = &bb->env_monitor;
7680 	const struct rtw89_chip_info *chip = rtwdev->chip;
7681 	struct ieee80211_supported_band *sband;
7682 	enum rtw89_band hw_band;
7683 	enum nl80211_band band;
7684 	u8 idx;
7685 
7686 	if (!chip->support_noise)
7687 		return;
7688 
7689 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
7690 		sband = rtwdev->hw->wiphy->bands[band];
7691 		if (!sband)
7692 			continue;
7693 
7694 		hw_band = rtw89_nl80211_to_hw_band(band);
7695 		env->nhm_his[hw_band] =
7696 			devm_kcalloc(rtwdev->dev, sband->n_channels,
7697 				     sizeof(*env->nhm_his[0]), GFP_KERNEL);
7698 
7699 		for (idx = 0; idx < sband->n_channels; idx++)
7700 			INIT_LIST_HEAD(&env->nhm_his[hw_band][idx].list);
7701 
7702 		INIT_LIST_HEAD(&env->nhm_rpt_list);
7703 	}
7704 }
7705 
rtw89_phy_dm_init_data(struct rtw89_dev * rtwdev)7706 void rtw89_phy_dm_init_data(struct rtw89_dev *rtwdev)
7707 {
7708 	struct rtw89_bb_ctx *bb;
7709 
7710 	rtw89_for_each_capab_bb(rtwdev, bb)
7711 		__rtw89_phy_dm_init_data(rtwdev, bb);
7712 }
7713 
rtw89_phy_set_bss_color(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7714 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev,
7715 			     struct rtw89_vif_link *rtwvif_link)
7716 {
7717 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
7718 	const struct rtw89_chip_info *chip = rtwdev->chip;
7719 	const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld;
7720 	enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx;
7721 	struct ieee80211_bss_conf *bss_conf;
7722 	u8 bss_color;
7723 
7724 	rcu_read_lock();
7725 
7726 	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
7727 	if (!bss_conf->he_support || !vif->cfg.assoc) {
7728 		rcu_read_unlock();
7729 		return;
7730 	}
7731 
7732 	bss_color = bss_conf->he_bss_color.color;
7733 
7734 	rcu_read_unlock();
7735 
7736 	rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1,
7737 			      phy_idx);
7738 	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
7739 			      bss_color, phy_idx);
7740 	rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
7741 			      vif->cfg.aid, phy_idx);
7742 }
7743 
rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc * desc)7744 static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc)
7745 {
7746 	return desc->ch != 0;
7747 }
7748 
rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc * desc,const struct rtw89_chan * chan)7749 static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc,
7750 				   const struct rtw89_chan *chan)
7751 {
7752 	if (!rfk_chan_validate_desc(desc))
7753 		return false;
7754 
7755 	if (desc->ch != chan->channel)
7756 		return false;
7757 
7758 	if (desc->has_band && desc->band != chan->band_type)
7759 		return false;
7760 
7761 	if (desc->has_bw && desc->bw != chan->band_width)
7762 		return false;
7763 
7764 	return true;
7765 }
7766 
7767 struct rfk_chan_iter_data {
7768 	const struct rtw89_rfk_chan_desc desc;
7769 	unsigned int found;
7770 };
7771 
rfk_chan_iter_search(const struct rtw89_chan * chan,void * data)7772 static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data)
7773 {
7774 	struct rfk_chan_iter_data *iter_data = data;
7775 
7776 	if (rfk_chan_is_equivalent(&iter_data->desc, chan))
7777 		iter_data->found++;
7778 
7779 	return 0;
7780 }
7781 
rtw89_rfk_chan_lookup(struct rtw89_dev * rtwdev,const struct rtw89_rfk_chan_desc * desc,u8 desc_nr,const struct rtw89_chan * target_chan)7782 u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
7783 			 const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
7784 			 const struct rtw89_chan *target_chan)
7785 {
7786 	int sel = -1;
7787 	u8 i;
7788 
7789 	for (i = 0; i < desc_nr; i++) {
7790 		struct rfk_chan_iter_data iter_data = {
7791 			.desc = desc[i],
7792 		};
7793 
7794 		if (rfk_chan_is_equivalent(&desc[i], target_chan))
7795 			return i;
7796 
7797 		rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data);
7798 		if (!iter_data.found && sel == -1)
7799 			sel = i;
7800 	}
7801 
7802 	if (sel == -1) {
7803 		rtw89_debug(rtwdev, RTW89_DBG_RFK,
7804 			    "no idle rfk entry; force replace the first\n");
7805 		sel = 0;
7806 	}
7807 
7808 	return sel;
7809 }
7810 EXPORT_SYMBOL(rtw89_rfk_chan_lookup);
7811 
7812 static void
_rfk_write_rf(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)7813 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7814 {
7815 	rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
7816 }
7817 
7818 static void
_rfk_write32_mask(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)7819 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7820 {
7821 	rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
7822 }
7823 
7824 static void
_rfk_write32_set(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)7825 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7826 {
7827 	rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
7828 }
7829 
7830 static void
_rfk_write32_clr(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)7831 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7832 {
7833 	rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
7834 }
7835 
7836 static void
_rfk_delay(struct rtw89_dev * rtwdev,const struct rtw89_reg5_def * def)7837 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
7838 {
7839 	udelay(def->data);
7840 }
7841 
7842 static void
7843 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
7844 	[RTW89_RFK_F_WRF] = _rfk_write_rf,
7845 	[RTW89_RFK_F_WM] = _rfk_write32_mask,
7846 	[RTW89_RFK_F_WS] = _rfk_write32_set,
7847 	[RTW89_RFK_F_WC] = _rfk_write32_clr,
7848 	[RTW89_RFK_F_DELAY] = _rfk_delay,
7849 };
7850 
7851 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
7852 
7853 void
rtw89_rfk_parser(struct rtw89_dev * rtwdev,const struct rtw89_rfk_tbl * tbl)7854 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
7855 {
7856 	const struct rtw89_reg5_def *p = tbl->defs;
7857 	const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
7858 
7859 	for (; p < end; p++)
7860 		_rfk_handler[p->flag](rtwdev, p);
7861 }
7862 EXPORT_SYMBOL(rtw89_rfk_parser);
7863 
7864 #define RTW89_TSSI_FAST_MODE_NUM 4
7865 
7866 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = {
7867 	{0xD934, 0xff0000},
7868 	{0xD934, 0xff000000},
7869 	{0xD938, 0xff},
7870 	{0xD934, 0xff00},
7871 };
7872 
7873 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = {
7874 	{0xD930, 0xff0000},
7875 	{0xD930, 0xff000000},
7876 	{0xD934, 0xff},
7877 	{0xD930, 0xff00},
7878 };
7879 
7880 static
rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev * rtwdev,enum rtw89_mac_idx mac_idx,enum rtw89_tssi_bandedge_cfg bandedge_cfg,u32 val)7881 void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev,
7882 					   enum rtw89_mac_idx mac_idx,
7883 					   enum rtw89_tssi_bandedge_cfg bandedge_cfg,
7884 					   u32 val)
7885 {
7886 	const struct rtw89_reg_def *regs;
7887 	u32 reg;
7888 	int i;
7889 
7890 	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
7891 		regs = rtw89_tssi_fastmode_regs_flat;
7892 	else
7893 		regs = rtw89_tssi_fastmode_regs_level;
7894 
7895 	for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) {
7896 		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
7897 		rtw89_write32_mask(rtwdev, reg, regs[i].mask, val);
7898 	}
7899 }
7900 
7901 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = {
7902 	{0xD91C, 0xff000000},
7903 	{0xD920, 0xff},
7904 	{0xD920, 0xff00},
7905 	{0xD920, 0xff0000},
7906 	{0xD920, 0xff000000},
7907 	{0xD924, 0xff},
7908 	{0xD924, 0xff00},
7909 	{0xD914, 0xff000000},
7910 	{0xD918, 0xff},
7911 	{0xD918, 0xff00},
7912 	{0xD918, 0xff0000},
7913 	{0xD918, 0xff000000},
7914 	{0xD91C, 0xff},
7915 	{0xD91C, 0xff00},
7916 	{0xD91C, 0xff0000},
7917 };
7918 
7919 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = {
7920 	{0xD910, 0xff},
7921 	{0xD910, 0xff00},
7922 	{0xD910, 0xff0000},
7923 	{0xD910, 0xff000000},
7924 	{0xD914, 0xff},
7925 	{0xD914, 0xff00},
7926 	{0xD914, 0xff0000},
7927 	{0xD908, 0xff},
7928 	{0xD908, 0xff00},
7929 	{0xD908, 0xff0000},
7930 	{0xD908, 0xff000000},
7931 	{0xD90C, 0xff},
7932 	{0xD90C, 0xff00},
7933 	{0xD90C, 0xff0000},
7934 	{0xD90C, 0xff000000},
7935 };
7936 
rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev * rtwdev,enum rtw89_mac_idx mac_idx,enum rtw89_tssi_bandedge_cfg bandedge_cfg)7937 void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev,
7938 					  enum rtw89_mac_idx mac_idx,
7939 					  enum rtw89_tssi_bandedge_cfg bandedge_cfg)
7940 {
7941 	const struct rtw89_chip_info *chip = rtwdev->chip;
7942 	const struct rtw89_reg_def *regs;
7943 	const u32 *data;
7944 	u32 reg;
7945 	int i;
7946 
7947 	if (bandedge_cfg >= RTW89_TSSI_CFG_NUM)
7948 		return;
7949 
7950 	if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT)
7951 		regs = rtw89_tssi_bandedge_regs_flat;
7952 	else
7953 		regs = rtw89_tssi_bandedge_regs_level;
7954 
7955 	data = chip->tssi_dbw_table->data[bandedge_cfg];
7956 
7957 	for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) {
7958 		reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx);
7959 		rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]);
7960 	}
7961 
7962 	reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx);
7963 	rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg);
7964 
7965 	rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg,
7966 					      data[RTW89_TSSI_SBW20]);
7967 }
7968 EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg);
7969 
7970 static
7971 const u8 rtw89_ch_base_table[16] = {1, 0xff,
7972 				    36, 100, 132, 149, 0xff,
7973 				    1, 33, 65, 97, 129, 161, 193, 225, 0xff};
7974 #define RTW89_CH_BASE_IDX_2G		0
7975 #define RTW89_CH_BASE_IDX_5G_FIRST	2
7976 #define RTW89_CH_BASE_IDX_5G_LAST	5
7977 #define RTW89_CH_BASE_IDX_6G_FIRST	7
7978 #define RTW89_CH_BASE_IDX_6G_LAST	14
7979 
7980 #define RTW89_CH_BASE_IDX_MASK		GENMASK(7, 4)
7981 #define RTW89_CH_OFFSET_MASK		GENMASK(3, 0)
7982 
rtw89_encode_chan_idx(struct rtw89_dev * rtwdev,u8 central_ch,u8 band)7983 u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band)
7984 {
7985 	u8 chan_idx;
7986 	u8 last, first;
7987 	u8 idx;
7988 
7989 	switch (band) {
7990 	case RTW89_BAND_2G:
7991 		chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) |
7992 			   FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch);
7993 		return chan_idx;
7994 	case RTW89_BAND_5G:
7995 		first = RTW89_CH_BASE_IDX_5G_FIRST;
7996 		last = RTW89_CH_BASE_IDX_5G_LAST;
7997 		break;
7998 	case RTW89_BAND_6G:
7999 		first = RTW89_CH_BASE_IDX_6G_FIRST;
8000 		last = RTW89_CH_BASE_IDX_6G_LAST;
8001 		break;
8002 	default:
8003 		rtw89_warn(rtwdev, "Unsupported band %d\n", band);
8004 		return 0;
8005 	}
8006 
8007 	for (idx = last; idx >= first; idx--)
8008 		if (central_ch >= rtw89_ch_base_table[idx])
8009 			break;
8010 
8011 	if (idx < first) {
8012 		rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch);
8013 		return 0;
8014 	}
8015 
8016 	chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) |
8017 		   FIELD_PREP(RTW89_CH_OFFSET_MASK,
8018 			      (central_ch - rtw89_ch_base_table[idx]) >> 1);
8019 	return chan_idx;
8020 }
8021 EXPORT_SYMBOL(rtw89_encode_chan_idx);
8022 
rtw89_decode_chan_idx(struct rtw89_dev * rtwdev,u8 chan_idx,u8 * ch,enum nl80211_band * band)8023 void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx,
8024 			   u8 *ch, enum nl80211_band *band)
8025 {
8026 	u8 idx, offset;
8027 
8028 	idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx);
8029 	offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx);
8030 
8031 	if (idx == RTW89_CH_BASE_IDX_2G) {
8032 		*band = NL80211_BAND_2GHZ;
8033 		*ch = offset;
8034 		return;
8035 	}
8036 
8037 	*band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ;
8038 	*ch = rtw89_ch_base_table[idx] + (offset << 1);
8039 }
8040 EXPORT_SYMBOL(rtw89_decode_chan_idx);
8041 
rtw89_phy_config_edcca(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb,bool scan)8042 void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev,
8043 			    struct rtw89_bb_ctx *bb, bool scan)
8044 {
8045 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
8046 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
8047 
8048 	if (scan) {
8049 		edcca_bak->a =
8050 			rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
8051 					     edcca_regs->edcca_mask, bb->phy_idx);
8052 		edcca_bak->p =
8053 			rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level,
8054 					     edcca_regs->edcca_p_mask, bb->phy_idx);
8055 		edcca_bak->ppdu =
8056 			rtw89_phy_read32_idx(rtwdev, edcca_regs->ppdu_level,
8057 					     edcca_regs->ppdu_mask, bb->phy_idx);
8058 
8059 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8060 				      edcca_regs->edcca_mask, EDCCA_MAX, bb->phy_idx);
8061 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8062 				      edcca_regs->edcca_p_mask, EDCCA_MAX, bb->phy_idx);
8063 		rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
8064 				      edcca_regs->ppdu_mask, EDCCA_MAX, bb->phy_idx);
8065 	} else {
8066 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8067 				      edcca_regs->edcca_mask,
8068 				      edcca_bak->a, bb->phy_idx);
8069 		rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8070 				      edcca_regs->edcca_p_mask,
8071 				      edcca_bak->p, bb->phy_idx);
8072 		rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
8073 				      edcca_regs->ppdu_mask,
8074 				      edcca_bak->ppdu, bb->phy_idx);
8075 	}
8076 }
8077 
rtw89_phy_edcca_log(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)8078 static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
8079 {
8080 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
8081 	const struct rtw89_edcca_p_regs *edcca_p_regs;
8082 	bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80;
8083 	s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80;
8084 	u8 path, per20_bitmap = 0;
8085 	u8 pwdb_sel = 5;
8086 	u8 pwdb[8];
8087 	u32 tmp;
8088 
8089 	if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA))
8090 		return;
8091 
8092 	if (bb->phy_idx == RTW89_PHY_1)
8093 		edcca_p_regs = &edcca_regs->p[RTW89_PHY_1];
8094 	else
8095 		edcca_p_regs = &edcca_regs->p[RTW89_PHY_0];
8096 
8097 	rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8098 			       edcca_p_regs->rpt_sel_mask, 0);
8099 	if (rtwdev->chip->chip_id == RTL8922A || rtwdev->chip->chip_id == RTL8922D) {
8100 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
8101 				       edcca_regs->rpt_sel_be_mask, 0);
8102 		per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a,
8103 						     MASKBYTE0);
8104 	}
8105 	tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
8106 	path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK);
8107 	flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80);
8108 	flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40);
8109 	flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20);
8110 	flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20);
8111 	flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB);
8112 	pwdb_s20 = u32_get_bits(tmp, MASKBYTE1);
8113 	pwdb_p20 = u32_get_bits(tmp, MASKBYTE2);
8114 	pwdb_fb = u32_get_bits(tmp, MASKBYTE3);
8115 
8116 	if (rtwdev->chip->chip_id == RTL8922D)
8117 		pwdb_sel = 2;
8118 
8119 	rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8120 			       edcca_p_regs->rpt_sel_mask, pwdb_sel);
8121 	tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
8122 	pwdb_s80 = u32_get_bits(tmp, MASKBYTE1);
8123 	pwdb_s40 = u32_get_bits(tmp, MASKBYTE2);
8124 
8125 	if (rtwdev->chip->chip_id == RTL8922A || rtwdev->chip->chip_id == RTL8922D) {
8126 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
8127 				       edcca_regs->rpt_sel_be_mask, 4);
8128 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
8129 		pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
8130 		pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
8131 		pwdb[2] = u32_get_bits(tmp, MASKBYTE1);
8132 		pwdb[3] = u32_get_bits(tmp, MASKBYTE0);
8133 
8134 		rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be,
8135 				       edcca_regs->rpt_sel_be_mask, 5);
8136 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b);
8137 		pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
8138 		pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
8139 		pwdb[6] = u32_get_bits(tmp, MASKBYTE1);
8140 		pwdb[7] = u32_get_bits(tmp, MASKBYTE0);
8141 	} else {
8142 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8143 				       edcca_p_regs->rpt_sel_mask, 0);
8144 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
8145 		pwdb[0] = u32_get_bits(tmp, MASKBYTE3);
8146 		pwdb[1] = u32_get_bits(tmp, MASKBYTE2);
8147 
8148 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8149 				       edcca_p_regs->rpt_sel_mask, 5);
8150 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
8151 		pwdb[2] = u32_get_bits(tmp, MASKBYTE3);
8152 		pwdb[3] = u32_get_bits(tmp, MASKBYTE2);
8153 
8154 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8155 				       edcca_p_regs->rpt_sel_mask, 2);
8156 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
8157 		pwdb[4] = u32_get_bits(tmp, MASKBYTE3);
8158 		pwdb[5] = u32_get_bits(tmp, MASKBYTE2);
8159 
8160 		rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel,
8161 				       edcca_p_regs->rpt_sel_mask, 3);
8162 		tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a);
8163 		pwdb[6] = u32_get_bits(tmp, MASKBYTE3);
8164 		pwdb[7] = u32_get_bits(tmp, MASKBYTE2);
8165 	}
8166 
8167 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8168 		    "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap);
8169 
8170 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8171 		    "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n",
8172 		    pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5],
8173 		    pwdb[6], pwdb[7]);
8174 
8175 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8176 		    "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n",
8177 		    path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80);
8178 
8179 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8180 		    "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n",
8181 		    pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80);
8182 }
8183 
rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)8184 static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev,
8185 					   struct rtw89_bb_ctx *bb)
8186 {
8187 	struct rtw89_phy_ch_info *ch_info = &bb->ch_info;
8188 	bool is_linked = rtwdev->total_sta_assoc > 0;
8189 	u8 rssi_min = ch_info->rssi_min >> 1;
8190 	u8 edcca_thre;
8191 
8192 	if (!is_linked) {
8193 		edcca_thre = EDCCA_MAX;
8194 	} else {
8195 		edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER -
8196 			     EDCCA_TH_REF;
8197 		edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB);
8198 	}
8199 
8200 	return edcca_thre;
8201 }
8202 
rtw89_phy_edcca_thre_calc(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)8203 void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
8204 {
8205 	const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs;
8206 	struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak;
8207 	u8 th;
8208 
8209 	th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev, bb);
8210 	if (th == edcca_bak->th_old)
8211 		return;
8212 
8213 	edcca_bak->th_old = th;
8214 
8215 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA,
8216 		    "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th);
8217 
8218 	rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8219 			      edcca_regs->edcca_mask, th, bb->phy_idx);
8220 	rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level,
8221 			      edcca_regs->edcca_p_mask, th, bb->phy_idx);
8222 	rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level,
8223 			      edcca_regs->ppdu_mask, th, bb->phy_idx);
8224 }
8225 
8226 static
__rtw89_phy_edcca_track(struct rtw89_dev * rtwdev,struct rtw89_bb_ctx * bb)8227 void __rtw89_phy_edcca_track(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb)
8228 {
8229 	rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca track\n", bb->phy_idx);
8230 
8231 	rtw89_phy_edcca_thre_calc(rtwdev, bb);
8232 	rtw89_phy_edcca_log(rtwdev, bb);
8233 }
8234 
rtw89_phy_edcca_track(struct rtw89_dev * rtwdev)8235 void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev)
8236 {
8237 	struct rtw89_hal *hal = &rtwdev->hal;
8238 	struct rtw89_bb_ctx *bb;
8239 
8240 	if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA))
8241 		return;
8242 
8243 	rtw89_for_each_active_bb(rtwdev, bb)
8244 		__rtw89_phy_edcca_track(rtwdev, bb);
8245 }
8246 
rtw89_phy_get_kpath(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)8247 enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
8248 					   enum rtw89_phy_idx phy_idx)
8249 {
8250 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
8251 		    "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
8252 		    rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
8253 
8254 	switch (rtwdev->mlo_dbcc_mode) {
8255 	case MLO_1_PLUS_1_1RF:
8256 		if (phy_idx == RTW89_PHY_0)
8257 			return RF_A;
8258 		else
8259 			return RF_B;
8260 	case MLO_1_PLUS_1_2RF:
8261 		if (phy_idx == RTW89_PHY_0)
8262 			return RF_A;
8263 		else
8264 			return RF_D;
8265 	case MLO_0_PLUS_2_1RF:
8266 	case MLO_2_PLUS_0_1RF:
8267 		/* for both PHY 0/1 */
8268 		return RF_AB;
8269 	case MLO_0_PLUS_2_2RF:
8270 	case MLO_2_PLUS_0_2RF:
8271 	case MLO_2_PLUS_2_2RF:
8272 	default:
8273 		if (phy_idx == RTW89_PHY_0)
8274 			return RF_AB;
8275 		else
8276 			return RF_CD;
8277 	}
8278 }
8279 EXPORT_SYMBOL(rtw89_phy_get_kpath);
8280 
rtw89_phy_get_syn_sel(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)8281 enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
8282 					 enum rtw89_phy_idx phy_idx)
8283 {
8284 	rtw89_debug(rtwdev, RTW89_DBG_RFK,
8285 		    "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n",
8286 		    rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx);
8287 
8288 	switch (rtwdev->mlo_dbcc_mode) {
8289 	case MLO_1_PLUS_1_1RF:
8290 		if (phy_idx == RTW89_PHY_0)
8291 			return RF_PATH_A;
8292 		else
8293 			return RF_PATH_B;
8294 	case MLO_1_PLUS_1_2RF:
8295 		if (phy_idx == RTW89_PHY_0)
8296 			return RF_PATH_A;
8297 		else
8298 			return RF_PATH_D;
8299 	case MLO_0_PLUS_2_1RF:
8300 	case MLO_2_PLUS_0_1RF:
8301 		if (phy_idx == RTW89_PHY_0)
8302 			return RF_PATH_A;
8303 		else
8304 			return RF_PATH_B;
8305 	case MLO_0_PLUS_2_2RF:
8306 	case MLO_2_PLUS_0_2RF:
8307 	case MLO_2_PLUS_2_2RF:
8308 	default:
8309 		if (phy_idx == RTW89_PHY_0)
8310 			return RF_PATH_A;
8311 		else
8312 			return RF_PATH_C;
8313 	}
8314 }
8315 EXPORT_SYMBOL(rtw89_phy_get_syn_sel);
8316 
8317 static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = {
8318 	.setting_addr = R_CCX,
8319 	.edcca_opt_mask = B_CCX_EDCCA_OPT_MSK,
8320 	.measurement_trig_mask = B_MEASUREMENT_TRIG_MSK,
8321 	.trig_opt_mask = B_CCX_TRIG_OPT_MSK,
8322 	.en_mask = B_CCX_EN_MSK,
8323 	.ifs_cnt_addr = R_IFS_COUNTER,
8324 	.ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK,
8325 	.ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK,
8326 	.ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK,
8327 	.ifs_collect_en_mask = B_IFS_COLLECT_EN,
8328 	.ifs_t1_addr = R_IFS_T1,
8329 	.ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK,
8330 	.ifs_t1_en_mask = B_IFS_T1_EN_MSK,
8331 	.ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK,
8332 	.ifs_t2_addr = R_IFS_T2,
8333 	.ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK,
8334 	.ifs_t2_en_mask = B_IFS_T2_EN_MSK,
8335 	.ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK,
8336 	.ifs_t3_addr = R_IFS_T3,
8337 	.ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK,
8338 	.ifs_t3_en_mask = B_IFS_T3_EN_MSK,
8339 	.ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK,
8340 	.ifs_t4_addr = R_IFS_T4,
8341 	.ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK,
8342 	.ifs_t4_en_mask = B_IFS_T4_EN_MSK,
8343 	.ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK,
8344 	.ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT,
8345 	.ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK,
8346 	.ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK,
8347 	.ifs_clm_cca_addr = R_IFS_CLM_CCA,
8348 	.ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK,
8349 	.ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK,
8350 	.ifs_clm_fa_addr = R_IFS_CLM_FA,
8351 	.ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK,
8352 	.ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK,
8353 	.ifs_his_addr = R_IFS_HIS,
8354 	.ifs_his_addr2 = R_IFS_HIS,
8355 	.ifs_t4_his_mask = B_IFS_T4_HIS_MSK,
8356 	.ifs_t3_his_mask = B_IFS_T3_HIS_MSK,
8357 	.ifs_t2_his_mask = B_IFS_T2_HIS_MSK,
8358 	.ifs_t1_his_mask = B_IFS_T1_HIS_MSK,
8359 	.ifs_avg_l_addr = R_IFS_AVG_L,
8360 	.ifs_t2_avg_mask = B_IFS_T2_AVG_MSK,
8361 	.ifs_t1_avg_mask = B_IFS_T1_AVG_MSK,
8362 	.ifs_avg_h_addr = R_IFS_AVG_H,
8363 	.ifs_t4_avg_mask = B_IFS_T4_AVG_MSK,
8364 	.ifs_t3_avg_mask = B_IFS_T3_AVG_MSK,
8365 	.ifs_cca_l_addr = R_IFS_CCA_L,
8366 	.ifs_t2_cca_mask = B_IFS_T2_CCA_MSK,
8367 	.ifs_t1_cca_mask = B_IFS_T1_CCA_MSK,
8368 	.ifs_cca_h_addr = R_IFS_CCA_H,
8369 	.ifs_t4_cca_mask = B_IFS_T4_CCA_MSK,
8370 	.ifs_t3_cca_mask = B_IFS_T3_CCA_MSK,
8371 	.ifs_total_addr = R_IFSCNT,
8372 	.ifs_cnt_done_mask = B_IFSCNT_DONE_MSK,
8373 	.ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK,
8374 	.nhm = R_NHM_AX,
8375 	.nhm_ready = B_NHM_READY_MSK,
8376 	.nhm_config = R_NHM_CFG,
8377 	.nhm_period_mask = B_NHM_PERIOD_MSK,
8378 	.nhm_unit_mask = B_NHM_COUNTER_MSK,
8379 	.nhm_include_cca_mask = B_NHM_INCLUDE_CCA_MSK,
8380 	.nhm_en_mask = B_NHM_EN_MSK,
8381 	.nhm_method = R_NHM_TH9,
8382 	.nhm_pwr_method_msk = B_NHM_PWDB_METHOD_MSK,
8383 };
8384 
8385 static const struct rtw89_physts_regs rtw89_physts_regs_ax = {
8386 	.setting_addr = R_PLCP_HISTOGRAM,
8387 	.dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL,
8388 	.dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK,
8389 };
8390 
8391 static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = {
8392 	.comp = R_DCFO_WEIGHT,
8393 	.weighting_mask = B_DCFO_WEIGHT_MSK,
8394 	.comp_seg0 = R_DCFO_OPT,
8395 	.valid_0_mask = B_DCFO_OPT_EN,
8396 };
8397 
8398 const struct rtw89_phy_gen_def rtw89_phy_gen_ax = {
8399 	.cr_base = 0x10000,
8400 	.physt_bmp_start = R_PHY_STS_BITMAP_ADDR_START,
8401 	.physt_bmp_eht = 0xfc,
8402 	.ccx = &rtw89_ccx_regs_ax,
8403 	.physts = &rtw89_physts_regs_ax,
8404 	.cfo = &rtw89_cfo_regs_ax,
8405 	.bb_wrap = NULL,
8406 	.phy0_phy1_offset = rtw89_phy0_phy1_offset_ax,
8407 	.config_bb_gain = rtw89_phy_config_bb_gain_ax,
8408 	.preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax,
8409 	.bb_wrap_init = NULL,
8410 	.ch_info_init = NULL,
8411 
8412 	.set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax,
8413 	.set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax,
8414 	.set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax,
8415 	.set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax,
8416 };
8417 EXPORT_SYMBOL(rtw89_phy_gen_ax);
8418