1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "chan.h" 6 #include "coex.h" 7 #include "debug.h" 8 #include "fw.h" 9 #include "mac.h" 10 #include "phy.h" 11 #include "ps.h" 12 #include "reg.h" 13 #include "sar.h" 14 #include "txrx.h" 15 #include "util.h" 16 17 static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr) 18 { 19 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 20 21 return phy->phy0_phy1_offset(rtwdev, addr); 22 } 23 24 static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev, 25 const struct rtw89_ra_report *report) 26 { 27 u32 bit_rate = report->bit_rate; 28 29 /* lower than ofdm, do not aggregate */ 30 if (bit_rate < 550) 31 return 1; 32 33 /* avoid AMSDU for legacy rate */ 34 if (report->might_fallback_legacy) 35 return 1; 36 37 /* lower than 20M vht 2ss mcs8, make it small */ 38 if (bit_rate < 1800) 39 return 1200; 40 41 /* lower than 40M vht 2ss mcs9, make it medium */ 42 if (bit_rate < 4000) 43 return 2600; 44 45 /* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */ 46 if (bit_rate < 7000) 47 return 3500; 48 49 return rtwdev->chip->max_amsdu_limit; 50 } 51 52 static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap) 53 { 54 u64 ra_mask = 0; 55 u8 mcs_cap; 56 int i, nss; 57 58 for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) { 59 mcs_cap = mcs_map & 0x3; 60 switch (mcs_cap) { 61 case 2: 62 ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss; 63 break; 64 case 1: 65 ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss; 66 break; 67 case 0: 68 ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss; 69 break; 70 default: 71 break; 72 } 73 } 74 75 return ra_mask; 76 } 77 78 static u64 get_he_ra_mask(struct ieee80211_link_sta *link_sta) 79 { 80 struct ieee80211_sta_he_cap cap = link_sta->he_cap; 81 u16 mcs_map; 82 83 switch (link_sta->bandwidth) { 84 case IEEE80211_STA_RX_BW_160: 85 if (cap.he_cap_elem.phy_cap_info[0] & 86 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 87 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80); 88 else 89 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160); 90 break; 91 default: 92 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80); 93 } 94 95 /* MCS11, MCS9, MCS7 */ 96 return get_mcs_ra_mask(mcs_map, 11, 2); 97 } 98 99 static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss) 100 { 101 u64 nss_mcs_shift; 102 u64 nss_mcs_val; 103 u64 mask = 0; 104 int i, j; 105 u8 nss; 106 107 for (i = 0; i < n_nss; i++) { 108 nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX); 109 if (!nss) 110 continue; 111 112 nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0); 113 114 for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16) 115 mask |= nss_mcs_val << nss_mcs_shift; 116 } 117 118 return mask; 119 } 120 121 static u64 get_eht_ra_mask(struct ieee80211_link_sta *link_sta) 122 { 123 struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap; 124 struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz; 125 struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss; 126 u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info; 127 128 switch (link_sta->bandwidth) { 129 case IEEE80211_STA_RX_BW_320: 130 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320; 131 /* MCS 9, 11, 13 */ 132 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 133 case IEEE80211_STA_RX_BW_160: 134 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160; 135 /* MCS 9, 11, 13 */ 136 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 137 case IEEE80211_STA_RX_BW_20: 138 if (!(he_phy_cap[0] & 139 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { 140 mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz; 141 /* MCS 7, 9, 11, 13 */ 142 return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4); 143 } 144 fallthrough; 145 case IEEE80211_STA_RX_BW_80: 146 default: 147 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80; 148 /* MCS 9, 11, 13 */ 149 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 150 } 151 } 152 153 #define RA_FLOOR_TABLE_SIZE 7 154 #define RA_FLOOR_UP_GAP 3 155 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi, 156 u8 ratr_state) 157 { 158 u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100}; 159 u8 rssi_lv = 0; 160 u8 i; 161 162 rssi >>= 1; 163 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 164 if (i >= ratr_state) 165 rssi_lv_t[i] += RA_FLOOR_UP_GAP; 166 if (rssi < rssi_lv_t[i]) { 167 rssi_lv = i; 168 break; 169 } 170 } 171 if (rssi_lv == 0) 172 return 0xffffffffffffffffULL; 173 else if (rssi_lv == 1) 174 return 0xfffffffffffffff0ULL; 175 else if (rssi_lv == 2) 176 return 0xffffffffffffefe0ULL; 177 else if (rssi_lv == 3) 178 return 0xffffffffffffcfc0ULL; 179 else if (rssi_lv == 4) 180 return 0xffffffffffff8f80ULL; 181 else if (rssi_lv >= 5) 182 return 0xffffffffffff0f00ULL; 183 184 return 0xffffffffffffffffULL; 185 } 186 187 static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak) 188 { 189 if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0) 190 ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); 191 192 if (ra_mask == 0) 193 ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); 194 195 return ra_mask; 196 } 197 198 static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, 199 struct rtw89_sta_link *rtwsta_link, 200 struct ieee80211_link_sta *link_sta, 201 const struct rtw89_chan *chan) 202 { 203 struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask; 204 enum nl80211_band band; 205 u64 cfg_mask; 206 207 if (!rtwsta_link->use_cfg_mask) 208 return -1; 209 210 switch (chan->band_type) { 211 case RTW89_BAND_2G: 212 band = NL80211_BAND_2GHZ; 213 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy, 214 RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES); 215 break; 216 case RTW89_BAND_5G: 217 band = NL80211_BAND_5GHZ; 218 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy, 219 RA_MASK_OFDM_RATES); 220 break; 221 case RTW89_BAND_6G: 222 band = NL80211_BAND_6GHZ; 223 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy, 224 RA_MASK_OFDM_RATES); 225 break; 226 default: 227 rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type); 228 return -1; 229 } 230 231 if (link_sta->he_cap.has_he) { 232 cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0], 233 RA_MASK_HE_1SS_RATES); 234 cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1], 235 RA_MASK_HE_2SS_RATES); 236 } else if (link_sta->vht_cap.vht_supported) { 237 cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], 238 RA_MASK_VHT_1SS_RATES); 239 cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], 240 RA_MASK_VHT_2SS_RATES); 241 } else if (link_sta->ht_cap.ht_supported) { 242 cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], 243 RA_MASK_HT_1SS_RATES); 244 cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], 245 RA_MASK_HT_2SS_RATES); 246 } 247 248 return cfg_mask; 249 } 250 251 static const u64 252 rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES, 253 RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES}; 254 static const u64 255 rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES, 256 RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES}; 257 static const u64 258 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES, 259 RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES}; 260 static const u64 261 rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES, 262 RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES}; 263 264 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev, 265 struct rtw89_sta_link *rtwsta_link, 266 const struct rtw89_chan *chan, 267 bool *fix_giltf_en, u8 *fix_giltf) 268 { 269 struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask; 270 u8 band = chan->band_type; 271 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 272 u8 he_gi = mask->control[nl_band].he_gi; 273 u8 he_ltf = mask->control[nl_band].he_ltf; 274 275 if (!rtwsta_link->use_cfg_mask) 276 return; 277 278 if (he_ltf == 2 && he_gi == 2) { 279 *fix_giltf = RTW89_GILTF_LGI_4XHE32; 280 } else if (he_ltf == 2 && he_gi == 0) { 281 *fix_giltf = RTW89_GILTF_SGI_4XHE08; 282 } else if (he_ltf == 1 && he_gi == 1) { 283 *fix_giltf = RTW89_GILTF_2XHE16; 284 } else if (he_ltf == 1 && he_gi == 0) { 285 *fix_giltf = RTW89_GILTF_2XHE08; 286 } else if (he_ltf == 0 && he_gi == 1) { 287 *fix_giltf = RTW89_GILTF_1XHE16; 288 } else if (he_ltf == 0 && he_gi == 0) { 289 *fix_giltf = RTW89_GILTF_1XHE08; 290 } else { 291 *fix_giltf_en = false; 292 return; 293 } 294 295 *fix_giltf_en = true; 296 } 297 298 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, 299 struct rtw89_vif_link *rtwvif_link, 300 struct rtw89_sta_link *rtwsta_link, 301 struct ieee80211_link_sta *link_sta, 302 bool p2p, bool csi) 303 { 304 struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern; 305 struct rtw89_ra_info *ra = &rtwsta_link->ra; 306 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 307 rtwvif_link->chanctx_idx); 308 const u64 *high_rate_masks = rtw89_ra_mask_ht_rates; 309 u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi); 310 u64 ra_mask = 0; 311 u64 ra_mask_bak; 312 u8 mode = 0; 313 u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY; 314 u8 bw_mode = 0; 315 u8 stbc_en = 0; 316 u8 ldpc_en = 0; 317 u8 fix_giltf = 0; 318 u8 i; 319 bool sgi = false; 320 bool fix_giltf_en = false; 321 322 memset(ra, 0, sizeof(*ra)); 323 /* Set the ra mask from sta's capability */ 324 if (link_sta->eht_cap.has_eht) { 325 mode |= RTW89_RA_MODE_EHT; 326 ra_mask |= get_eht_ra_mask(link_sta); 327 high_rate_masks = rtw89_ra_mask_eht_rates; 328 } else if (link_sta->he_cap.has_he) { 329 mode |= RTW89_RA_MODE_HE; 330 csi_mode = RTW89_RA_RPT_MODE_HE; 331 ra_mask |= get_he_ra_mask(link_sta); 332 high_rate_masks = rtw89_ra_mask_he_rates; 333 if (link_sta->he_cap.he_cap_elem.phy_cap_info[2] & 334 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) 335 stbc_en = 1; 336 if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] & 337 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) 338 ldpc_en = 1; 339 rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, chan, &fix_giltf_en, &fix_giltf); 340 } else if (link_sta->vht_cap.vht_supported) { 341 u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map); 342 343 mode |= RTW89_RA_MODE_VHT; 344 csi_mode = RTW89_RA_RPT_MODE_VHT; 345 /* MCS9 (non-20MHz), MCS8, MCS7 */ 346 if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) 347 ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1); 348 else 349 ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1); 350 high_rate_masks = rtw89_ra_mask_vht_rates; 351 if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) 352 stbc_en = 1; 353 if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) 354 ldpc_en = 1; 355 } else if (link_sta->ht_cap.ht_supported) { 356 mode |= RTW89_RA_MODE_HT; 357 csi_mode = RTW89_RA_RPT_MODE_HT; 358 ra_mask |= ((u64)link_sta->ht_cap.mcs.rx_mask[3] << 48) | 359 ((u64)link_sta->ht_cap.mcs.rx_mask[2] << 36) | 360 ((u64)link_sta->ht_cap.mcs.rx_mask[1] << 24) | 361 ((u64)link_sta->ht_cap.mcs.rx_mask[0] << 12); 362 high_rate_masks = rtw89_ra_mask_ht_rates; 363 if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) 364 stbc_en = 1; 365 if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) 366 ldpc_en = 1; 367 } 368 369 switch (chan->band_type) { 370 case RTW89_BAND_2G: 371 ra_mask |= link_sta->supp_rates[NL80211_BAND_2GHZ]; 372 if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xf) 373 mode |= RTW89_RA_MODE_CCK; 374 if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0) 375 mode |= RTW89_RA_MODE_OFDM; 376 break; 377 case RTW89_BAND_5G: 378 ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_5GHZ] << 4; 379 mode |= RTW89_RA_MODE_OFDM; 380 break; 381 case RTW89_BAND_6G: 382 ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_6GHZ] << 4; 383 mode |= RTW89_RA_MODE_OFDM; 384 break; 385 default: 386 rtw89_err(rtwdev, "Unknown band type\n"); 387 break; 388 } 389 390 ra_mask_bak = ra_mask; 391 392 if (mode >= RTW89_RA_MODE_HT) { 393 u64 mask = 0; 394 for (i = 0; i < rtwdev->hal.tx_nss; i++) 395 mask |= high_rate_masks[i]; 396 if (mode & RTW89_RA_MODE_OFDM) 397 mask |= RA_MASK_SUBOFDM_RATES; 398 if (mode & RTW89_RA_MODE_CCK) 399 mask |= RA_MASK_SUBCCK_RATES; 400 ra_mask &= mask; 401 } else if (mode & RTW89_RA_MODE_OFDM) { 402 ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES); 403 } 404 405 if (mode != RTW89_RA_MODE_CCK) 406 ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0); 407 408 ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak); 409 ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan); 410 411 switch (link_sta->bandwidth) { 412 case IEEE80211_STA_RX_BW_160: 413 bw_mode = RTW89_CHANNEL_WIDTH_160; 414 sgi = link_sta->vht_cap.vht_supported && 415 (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160); 416 break; 417 case IEEE80211_STA_RX_BW_80: 418 bw_mode = RTW89_CHANNEL_WIDTH_80; 419 sgi = link_sta->vht_cap.vht_supported && 420 (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); 421 break; 422 case IEEE80211_STA_RX_BW_40: 423 bw_mode = RTW89_CHANNEL_WIDTH_40; 424 sgi = link_sta->ht_cap.ht_supported && 425 (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); 426 break; 427 default: 428 bw_mode = RTW89_CHANNEL_WIDTH_20; 429 sgi = link_sta->ht_cap.ht_supported && 430 (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); 431 break; 432 } 433 434 if (link_sta->he_cap.he_cap_elem.phy_cap_info[3] & 435 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM) 436 ra->dcm_cap = 1; 437 438 if (rate_pattern->enable && !p2p) { 439 ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan); 440 ra_mask &= rate_pattern->ra_mask; 441 mode = rate_pattern->ra_mode; 442 } 443 444 ra->bw_cap = bw_mode; 445 ra->er_cap = rtwsta_link->er_cap; 446 ra->mode_ctrl = mode; 447 ra->macid = rtwsta_link->mac_id; 448 ra->stbc_cap = stbc_en; 449 ra->ldpc_cap = ldpc_en; 450 ra->ss_num = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 451 ra->en_sgi = sgi; 452 ra->ra_mask = ra_mask; 453 ra->fix_giltf_en = fix_giltf_en; 454 ra->fix_giltf = fix_giltf; 455 456 if (!csi) 457 return; 458 459 ra->fixed_csi_rate_en = false; 460 ra->ra_csi_rate_en = true; 461 ra->cr_tbl_sel = false; 462 ra->band_num = rtwvif_link->phy_idx; 463 ra->csi_bw = bw_mode; 464 ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32; 465 ra->csi_mcs_ss_idx = 5; 466 ra->csi_mode = csi_mode; 467 } 468 469 static void __rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, 470 struct rtw89_vif_link *rtwvif_link, 471 struct rtw89_sta_link *rtwsta_link, 472 u32 changed) 473 { 474 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 475 struct rtw89_ra_info *ra = &rtwsta_link->ra; 476 struct ieee80211_link_sta *link_sta; 477 478 rcu_read_lock(); 479 480 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 481 rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link, 482 link_sta, vif->p2p, false); 483 484 rcu_read_unlock(); 485 486 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) 487 ra->upd_mask = 1; 488 if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED)) 489 ra->upd_bw_nss_mask = 1; 490 491 rtw89_debug(rtwdev, RTW89_DBG_RA, 492 "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d", 493 ra->macid, 494 ra->bw_cap, 495 ra->ss_num, 496 ra->en_sgi, 497 ra->giltf); 498 499 rtw89_fw_h2c_ra(rtwdev, ra, false); 500 } 501 502 void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, 503 u32 changed) 504 { 505 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 506 struct rtw89_vif_link *rtwvif_link; 507 struct rtw89_sta_link *rtwsta_link; 508 unsigned int link_id; 509 510 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 511 rtwvif_link = rtwsta_link->rtwvif_link; 512 __rtw89_phy_ra_update_sta(rtwdev, rtwvif_link, rtwsta_link, changed); 513 } 514 } 515 516 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next, 517 u16 rate_base, u64 ra_mask, u8 ra_mode, 518 u32 rate_ctrl, u32 ctrl_skip, bool force) 519 { 520 u8 n, c; 521 522 if (rate_ctrl == ctrl_skip) 523 return true; 524 525 n = hweight32(rate_ctrl); 526 if (n == 0) 527 return true; 528 529 if (force && n != 1) 530 return false; 531 532 if (next->enable) 533 return false; 534 535 c = __fls(rate_ctrl); 536 next->rate = rate_base + c; 537 next->ra_mode = ra_mode; 538 next->ra_mask = ra_mask; 539 next->enable = true; 540 541 return true; 542 } 543 544 #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \ 545 { \ 546 [RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \ 547 [RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \ 548 } 549 550 static 551 void __rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, 552 struct rtw89_vif_link *rtwvif_link, 553 const struct cfg80211_bitrate_mask *mask) 554 { 555 struct ieee80211_supported_band *sband; 556 struct rtw89_phy_rate_pattern next_pattern = {0}; 557 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 558 rtwvif_link->chanctx_idx); 559 static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = { 560 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0), 561 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0), 562 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0), 563 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0), 564 }; 565 static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = { 566 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0), 567 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0), 568 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0), 569 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0), 570 }; 571 static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = { 572 RTW89_HW_RATE_BY_CHIP_GEN(MCS0), 573 RTW89_HW_RATE_BY_CHIP_GEN(MCS8), 574 RTW89_HW_RATE_BY_CHIP_GEN(MCS16), 575 RTW89_HW_RATE_BY_CHIP_GEN(MCS24), 576 }; 577 u8 band = chan->band_type; 578 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 579 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 580 u8 tx_nss = rtwdev->hal.tx_nss; 581 u8 i; 582 583 for (i = 0; i < tx_nss; i++) 584 if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen], 585 RA_MASK_HE_RATES, RTW89_RA_MODE_HE, 586 mask->control[nl_band].he_mcs[i], 587 0, true)) 588 goto out; 589 590 for (i = 0; i < tx_nss; i++) 591 if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen], 592 RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT, 593 mask->control[nl_band].vht_mcs[i], 594 0, true)) 595 goto out; 596 597 for (i = 0; i < tx_nss; i++) 598 if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen], 599 RA_MASK_HT_RATES, RTW89_RA_MODE_HT, 600 mask->control[nl_band].ht_mcs[i], 601 0, true)) 602 goto out; 603 604 /* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and 605 * require at least one basic rate for ieee80211_set_bitrate_mask, 606 * so the decision just depends on if all bitrates are set or not. 607 */ 608 sband = rtwdev->hw->wiphy->bands[nl_band]; 609 if (band == RTW89_BAND_2G) { 610 if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1, 611 RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES, 612 RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM, 613 mask->control[nl_band].legacy, 614 BIT(sband->n_bitrates) - 1, false)) 615 goto out; 616 } else { 617 if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6, 618 RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM, 619 mask->control[nl_band].legacy, 620 BIT(sband->n_bitrates) - 1, false)) 621 goto out; 622 } 623 624 if (!next_pattern.enable) 625 goto out; 626 627 rtwvif_link->rate_pattern = next_pattern; 628 rtw89_debug(rtwdev, RTW89_DBG_RA, 629 "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n", 630 next_pattern.rate, 631 next_pattern.ra_mask, 632 next_pattern.ra_mode); 633 return; 634 635 out: 636 rtwvif_link->rate_pattern.enable = false; 637 rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n"); 638 } 639 640 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, 641 struct ieee80211_vif *vif, 642 const struct cfg80211_bitrate_mask *mask) 643 { 644 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 645 struct rtw89_vif_link *rtwvif_link; 646 unsigned int link_id; 647 648 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 649 __rtw89_phy_rate_pattern_vif(rtwdev, rtwvif_link, mask); 650 } 651 652 static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta) 653 { 654 struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; 655 656 rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); 657 } 658 659 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev) 660 { 661 ieee80211_iterate_stations_atomic(rtwdev->hw, 662 rtw89_phy_ra_update_sta_iter, 663 rtwdev); 664 } 665 666 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link) 667 { 668 struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link; 669 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 670 struct rtw89_ra_info *ra = &rtwsta_link->ra; 671 u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR; 672 struct ieee80211_link_sta *link_sta; 673 bool csi; 674 675 rcu_read_lock(); 676 677 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 678 csi = rtw89_sta_has_beamformer_cap(link_sta); 679 680 rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link, 681 link_sta, vif->p2p, csi); 682 683 rcu_read_unlock(); 684 685 if (rssi > 40) 686 ra->init_rate_lv = 1; 687 else if (rssi > 20) 688 ra->init_rate_lv = 2; 689 else if (rssi > 1) 690 ra->init_rate_lv = 3; 691 else 692 ra->init_rate_lv = 0; 693 ra->upd_all = 1; 694 rtw89_debug(rtwdev, RTW89_DBG_RA, 695 "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d", 696 ra->macid, 697 ra->mode_ctrl, 698 ra->bw_cap, 699 ra->ss_num, 700 ra->init_rate_lv); 701 rtw89_debug(rtwdev, RTW89_DBG_RA, 702 "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d", 703 ra->dcm_cap, 704 ra->er_cap, 705 ra->ldpc_cap, 706 ra->stbc_cap, 707 ra->en_sgi, 708 ra->giltf); 709 710 rtw89_fw_h2c_ra(rtwdev, ra, csi); 711 } 712 713 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev, 714 const struct rtw89_chan *chan, 715 enum rtw89_bandwidth dbw) 716 { 717 enum rtw89_bandwidth cbw = chan->band_width; 718 u8 pri_ch = chan->primary_channel; 719 u8 central_ch = chan->channel; 720 u8 txsc_idx = 0; 721 u8 tmp = 0; 722 723 if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) 724 return txsc_idx; 725 726 switch (cbw) { 727 case RTW89_CHANNEL_WIDTH_40: 728 txsc_idx = pri_ch > central_ch ? 1 : 2; 729 break; 730 case RTW89_CHANNEL_WIDTH_80: 731 if (dbw == RTW89_CHANNEL_WIDTH_20) { 732 if (pri_ch > central_ch) 733 txsc_idx = (pri_ch - central_ch) >> 1; 734 else 735 txsc_idx = ((central_ch - pri_ch) >> 1) + 1; 736 } else { 737 txsc_idx = pri_ch > central_ch ? 9 : 10; 738 } 739 break; 740 case RTW89_CHANNEL_WIDTH_160: 741 if (pri_ch > central_ch) 742 tmp = (pri_ch - central_ch) >> 1; 743 else 744 tmp = ((central_ch - pri_ch) >> 1) + 1; 745 746 if (dbw == RTW89_CHANNEL_WIDTH_20) { 747 txsc_idx = tmp; 748 } else if (dbw == RTW89_CHANNEL_WIDTH_40) { 749 if (tmp == 1 || tmp == 3) 750 txsc_idx = 9; 751 else if (tmp == 5 || tmp == 7) 752 txsc_idx = 11; 753 else if (tmp == 2 || tmp == 4) 754 txsc_idx = 10; 755 else if (tmp == 6 || tmp == 8) 756 txsc_idx = 12; 757 else 758 return 0xff; 759 } else { 760 txsc_idx = pri_ch > central_ch ? 13 : 14; 761 } 762 break; 763 case RTW89_CHANNEL_WIDTH_80_80: 764 if (dbw == RTW89_CHANNEL_WIDTH_20) { 765 if (pri_ch > central_ch) 766 txsc_idx = (10 - (pri_ch - central_ch)) >> 1; 767 else 768 txsc_idx = ((central_ch - pri_ch) >> 1) + 5; 769 } else if (dbw == RTW89_CHANNEL_WIDTH_40) { 770 txsc_idx = pri_ch > central_ch ? 10 : 12; 771 } else { 772 txsc_idx = 14; 773 } 774 break; 775 default: 776 break; 777 } 778 779 return txsc_idx; 780 } 781 EXPORT_SYMBOL(rtw89_phy_get_txsc); 782 783 u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, 784 enum rtw89_bandwidth dbw) 785 { 786 enum rtw89_bandwidth cbw = chan->band_width; 787 u8 pri_ch = chan->primary_channel; 788 u8 central_ch = chan->channel; 789 u8 txsb_idx = 0; 790 791 if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) 792 return txsb_idx; 793 794 switch (cbw) { 795 case RTW89_CHANNEL_WIDTH_40: 796 txsb_idx = pri_ch > central_ch ? 1 : 0; 797 break; 798 case RTW89_CHANNEL_WIDTH_80: 799 if (dbw == RTW89_CHANNEL_WIDTH_20) 800 txsb_idx = (pri_ch - central_ch + 6) / 4; 801 else 802 txsb_idx = pri_ch > central_ch ? 1 : 0; 803 break; 804 case RTW89_CHANNEL_WIDTH_160: 805 if (dbw == RTW89_CHANNEL_WIDTH_20) 806 txsb_idx = (pri_ch - central_ch + 14) / 4; 807 else if (dbw == RTW89_CHANNEL_WIDTH_40) 808 txsb_idx = (pri_ch - central_ch + 12) / 8; 809 else 810 txsb_idx = pri_ch > central_ch ? 1 : 0; 811 break; 812 case RTW89_CHANNEL_WIDTH_320: 813 if (dbw == RTW89_CHANNEL_WIDTH_20) 814 txsb_idx = (pri_ch - central_ch + 30) / 4; 815 else if (dbw == RTW89_CHANNEL_WIDTH_40) 816 txsb_idx = (pri_ch - central_ch + 28) / 8; 817 else if (dbw == RTW89_CHANNEL_WIDTH_80) 818 txsb_idx = (pri_ch - central_ch + 24) / 16; 819 else 820 txsb_idx = pri_ch > central_ch ? 1 : 0; 821 break; 822 default: 823 break; 824 } 825 826 return txsb_idx; 827 } 828 EXPORT_SYMBOL(rtw89_phy_get_txsb); 829 830 static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev) 831 { 832 return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) || 833 !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1); 834 } 835 836 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 837 u32 addr, u32 mask) 838 { 839 const struct rtw89_chip_info *chip = rtwdev->chip; 840 const u32 *base_addr = chip->rf_base_addr; 841 u32 val, direct_addr; 842 843 if (rf_path >= rtwdev->chip->rf_path_num) { 844 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 845 return INV_RF_DATA; 846 } 847 848 addr &= 0xff; 849 direct_addr = base_addr[rf_path] + (addr << 2); 850 mask &= RFREG_MASK; 851 852 val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask); 853 854 return val; 855 } 856 EXPORT_SYMBOL(rtw89_phy_read_rf); 857 858 static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev, 859 enum rtw89_rf_path rf_path, u32 addr, u32 mask) 860 { 861 bool busy; 862 bool done; 863 u32 val; 864 int ret; 865 866 ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy, 867 1, 30, false, rtwdev); 868 if (ret) { 869 rtw89_err(rtwdev, "read rf busy swsi\n"); 870 return INV_RF_DATA; 871 } 872 873 mask &= RFREG_MASK; 874 875 val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) | 876 FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr); 877 rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val); 878 udelay(2); 879 880 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1, 881 30, false, rtwdev, R_SWSI_V1, 882 B_SWSI_R_DATA_DONE_V1); 883 if (ret) { 884 rtw89_err(rtwdev, "read swsi busy\n"); 885 return INV_RF_DATA; 886 } 887 888 return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask); 889 } 890 891 u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 892 u32 addr, u32 mask) 893 { 894 bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr); 895 896 if (rf_path >= rtwdev->chip->rf_path_num) { 897 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 898 return INV_RF_DATA; 899 } 900 901 if (ad_sel) 902 return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask); 903 else 904 return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask); 905 } 906 EXPORT_SYMBOL(rtw89_phy_read_rf_v1); 907 908 static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev, 909 enum rtw89_rf_path rf_path, u32 addr) 910 { 911 static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24}; 912 static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC}; 913 bool busy, done; 914 int ret; 915 u32 val; 916 917 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1); 918 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy, 919 1, 3800, false, 920 rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY); 921 if (ret) { 922 rtw89_warn(rtwdev, "poll HWSI is busy\n"); 923 return INV_RF_DATA; 924 } 925 926 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr); 927 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1); 928 udelay(2); 929 930 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 931 1, 3800, false, 932 rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE); 933 if (ret) { 934 rtw89_warn(rtwdev, "read HWSI is busy\n"); 935 val = INV_RF_DATA; 936 goto out; 937 } 938 939 val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK); 940 out: 941 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0); 942 943 return val; 944 } 945 946 static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev, 947 enum rtw89_rf_path rf_path, u32 addr, u32 mask) 948 { 949 u32 val; 950 951 val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr); 952 953 return (val & mask) >> __ffs(mask); 954 } 955 956 u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 957 u32 addr, u32 mask) 958 { 959 bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK); 960 961 if (rf_path >= rtwdev->chip->rf_path_num) { 962 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 963 return INV_RF_DATA; 964 } 965 966 if (ad_sel) 967 return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask); 968 else 969 return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask); 970 } 971 EXPORT_SYMBOL(rtw89_phy_read_rf_v2); 972 973 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 974 u32 addr, u32 mask, u32 data) 975 { 976 const struct rtw89_chip_info *chip = rtwdev->chip; 977 const u32 *base_addr = chip->rf_base_addr; 978 u32 direct_addr; 979 980 if (rf_path >= rtwdev->chip->rf_path_num) { 981 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 982 return false; 983 } 984 985 addr &= 0xff; 986 direct_addr = base_addr[rf_path] + (addr << 2); 987 mask &= RFREG_MASK; 988 989 rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data); 990 991 /* delay to ensure writing properly */ 992 udelay(1); 993 994 return true; 995 } 996 EXPORT_SYMBOL(rtw89_phy_write_rf); 997 998 static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev, 999 enum rtw89_rf_path rf_path, u32 addr, u32 mask, 1000 u32 data) 1001 { 1002 u8 bit_shift; 1003 u32 val; 1004 bool busy, b_msk_en = false; 1005 int ret; 1006 1007 ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy, 1008 1, 30, false, rtwdev); 1009 if (ret) { 1010 rtw89_err(rtwdev, "write rf busy swsi\n"); 1011 return false; 1012 } 1013 1014 data &= RFREG_MASK; 1015 mask &= RFREG_MASK; 1016 1017 if (mask != RFREG_MASK) { 1018 b_msk_en = true; 1019 rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK, 1020 mask); 1021 bit_shift = __ffs(mask); 1022 data = (data << bit_shift) & RFREG_MASK; 1023 } 1024 1025 val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) | 1026 FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) | 1027 FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) | 1028 FIELD_PREP(B_SWSI_DATA_VAL_V1, data); 1029 1030 rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val); 1031 1032 return true; 1033 } 1034 1035 bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1036 u32 addr, u32 mask, u32 data) 1037 { 1038 bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr); 1039 1040 if (rf_path >= rtwdev->chip->rf_path_num) { 1041 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 1042 return false; 1043 } 1044 1045 if (ad_sel) 1046 return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data); 1047 else 1048 return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data); 1049 } 1050 EXPORT_SYMBOL(rtw89_phy_write_rf_v1); 1051 1052 static 1053 bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1054 u32 addr, u32 data) 1055 { 1056 static const u32 addr_is_idle[2] = {0x2C24, 0x2D24}; 1057 static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0}; 1058 bool busy; 1059 u32 val; 1060 int ret; 1061 1062 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy, 1063 1, 3800, false, 1064 rtwdev, addr_is_idle[rf_path], BIT(29)); 1065 if (ret) { 1066 rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__); 1067 return false; 1068 } 1069 1070 val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) | 1071 u32_encode_bits(data, B_HWSI_DATA_VAL); 1072 1073 rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val); 1074 1075 return true; 1076 } 1077 1078 static 1079 bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1080 u32 addr, u32 mask, u32 data) 1081 { 1082 u32 val; 1083 1084 if (mask == RFREG_MASK) { 1085 val = data; 1086 } else { 1087 val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr); 1088 val &= ~mask; 1089 val |= (data << __ffs(mask)) & mask; 1090 } 1091 1092 return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val); 1093 } 1094 1095 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1096 u32 addr, u32 mask, u32 data) 1097 { 1098 bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK); 1099 1100 if (rf_path >= rtwdev->chip->rf_path_num) { 1101 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 1102 return INV_RF_DATA; 1103 } 1104 1105 if (ad_sel) 1106 return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data); 1107 else 1108 return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data); 1109 } 1110 EXPORT_SYMBOL(rtw89_phy_write_rf_v2); 1111 1112 static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev) 1113 { 1114 return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1; 1115 } 1116 1117 static void __rtw89_phy_bb_reset(struct rtw89_dev *rtwdev, 1118 enum rtw89_phy_idx phy_idx) 1119 { 1120 const struct rtw89_chip_info *chip = rtwdev->chip; 1121 1122 chip->ops->bb_reset(rtwdev, phy_idx); 1123 } 1124 1125 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev) 1126 { 1127 __rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0); 1128 if (rtwdev->dbcc_en) 1129 __rtw89_phy_bb_reset(rtwdev, RTW89_PHY_1); 1130 } 1131 1132 static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev, 1133 const struct rtw89_reg2_def *reg, 1134 enum rtw89_rf_path rf_path, 1135 void *extra_data) 1136 { 1137 u32 addr; 1138 1139 if (reg->addr == 0xfe) { 1140 mdelay(50); 1141 } else if (reg->addr == 0xfd) { 1142 mdelay(5); 1143 } else if (reg->addr == 0xfc) { 1144 mdelay(1); 1145 } else if (reg->addr == 0xfb) { 1146 udelay(50); 1147 } else if (reg->addr == 0xfa) { 1148 udelay(5); 1149 } else if (reg->addr == 0xf9) { 1150 udelay(1); 1151 } else if (reg->data == BYPASS_CR_DATA) { 1152 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr); 1153 } else { 1154 addr = reg->addr; 1155 1156 if ((uintptr_t)extra_data == RTW89_PHY_1) 1157 addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr); 1158 1159 rtw89_phy_write32(rtwdev, addr, reg->data); 1160 } 1161 } 1162 1163 union rtw89_phy_bb_gain_arg { 1164 u32 addr; 1165 struct { 1166 union { 1167 u8 type; 1168 struct { 1169 u8 rxsc_start:4; 1170 u8 bw:4; 1171 }; 1172 }; 1173 u8 path; 1174 u8 gain_band; 1175 u8 cfg_type; 1176 }; 1177 } __packed; 1178 1179 static void 1180 rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev, 1181 union rtw89_phy_bb_gain_arg arg, u32 data) 1182 { 1183 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1184 u8 type = arg.type; 1185 u8 path = arg.path; 1186 u8 gband = arg.gain_band; 1187 int i; 1188 1189 switch (type) { 1190 case 0: 1191 for (i = 0; i < 4; i++, data >>= 8) 1192 gain->lna_gain[gband][path][i] = data & 0xff; 1193 break; 1194 case 1: 1195 for (i = 4; i < 7; i++, data >>= 8) 1196 gain->lna_gain[gband][path][i] = data & 0xff; 1197 break; 1198 case 2: 1199 for (i = 0; i < 2; i++, data >>= 8) 1200 gain->tia_gain[gband][path][i] = data & 0xff; 1201 break; 1202 default: 1203 rtw89_warn(rtwdev, 1204 "bb gain error {0x%x:0x%x} with unknown type: %d\n", 1205 arg.addr, data, type); 1206 break; 1207 } 1208 } 1209 1210 enum rtw89_phy_bb_rxsc_start_idx { 1211 RTW89_BB_RXSC_START_IDX_FULL = 0, 1212 RTW89_BB_RXSC_START_IDX_20 = 1, 1213 RTW89_BB_RXSC_START_IDX_20_1 = 5, 1214 RTW89_BB_RXSC_START_IDX_40 = 9, 1215 RTW89_BB_RXSC_START_IDX_80 = 13, 1216 }; 1217 1218 static void 1219 rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev, 1220 union rtw89_phy_bb_gain_arg arg, u32 data) 1221 { 1222 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1223 u8 rxsc_start = arg.rxsc_start; 1224 u8 bw = arg.bw; 1225 u8 path = arg.path; 1226 u8 gband = arg.gain_band; 1227 u8 rxsc; 1228 s8 ofst; 1229 int i; 1230 1231 switch (bw) { 1232 case RTW89_CHANNEL_WIDTH_20: 1233 gain->rpl_ofst_20[gband][path] = (s8)data; 1234 break; 1235 case RTW89_CHANNEL_WIDTH_40: 1236 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1237 gain->rpl_ofst_40[gband][path][0] = (s8)data; 1238 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1239 for (i = 0; i < 2; i++, data >>= 8) { 1240 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1241 ofst = (s8)(data & 0xff); 1242 gain->rpl_ofst_40[gband][path][rxsc] = ofst; 1243 } 1244 } 1245 break; 1246 case RTW89_CHANNEL_WIDTH_80: 1247 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1248 gain->rpl_ofst_80[gband][path][0] = (s8)data; 1249 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1250 for (i = 0; i < 4; i++, data >>= 8) { 1251 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1252 ofst = (s8)(data & 0xff); 1253 gain->rpl_ofst_80[gband][path][rxsc] = ofst; 1254 } 1255 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { 1256 for (i = 0; i < 2; i++, data >>= 8) { 1257 rxsc = RTW89_BB_RXSC_START_IDX_40 + i; 1258 ofst = (s8)(data & 0xff); 1259 gain->rpl_ofst_80[gband][path][rxsc] = ofst; 1260 } 1261 } 1262 break; 1263 case RTW89_CHANNEL_WIDTH_160: 1264 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1265 gain->rpl_ofst_160[gband][path][0] = (s8)data; 1266 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1267 for (i = 0; i < 4; i++, data >>= 8) { 1268 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1269 ofst = (s8)(data & 0xff); 1270 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1271 } 1272 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) { 1273 for (i = 0; i < 4; i++, data >>= 8) { 1274 rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i; 1275 ofst = (s8)(data & 0xff); 1276 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1277 } 1278 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { 1279 for (i = 0; i < 4; i++, data >>= 8) { 1280 rxsc = RTW89_BB_RXSC_START_IDX_40 + i; 1281 ofst = (s8)(data & 0xff); 1282 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1283 } 1284 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) { 1285 for (i = 0; i < 2; i++, data >>= 8) { 1286 rxsc = RTW89_BB_RXSC_START_IDX_80 + i; 1287 ofst = (s8)(data & 0xff); 1288 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1289 } 1290 } 1291 break; 1292 default: 1293 rtw89_warn(rtwdev, 1294 "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n", 1295 arg.addr, data, bw); 1296 break; 1297 } 1298 } 1299 1300 static void 1301 rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev, 1302 union rtw89_phy_bb_gain_arg arg, u32 data) 1303 { 1304 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1305 u8 type = arg.type; 1306 u8 path = arg.path; 1307 u8 gband = arg.gain_band; 1308 int i; 1309 1310 switch (type) { 1311 case 0: 1312 for (i = 0; i < 4; i++, data >>= 8) 1313 gain->lna_gain_bypass[gband][path][i] = data & 0xff; 1314 break; 1315 case 1: 1316 for (i = 4; i < 7; i++, data >>= 8) 1317 gain->lna_gain_bypass[gband][path][i] = data & 0xff; 1318 break; 1319 default: 1320 rtw89_warn(rtwdev, 1321 "bb gain bypass {0x%x:0x%x} with unknown type: %d\n", 1322 arg.addr, data, type); 1323 break; 1324 } 1325 } 1326 1327 static void 1328 rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev, 1329 union rtw89_phy_bb_gain_arg arg, u32 data) 1330 { 1331 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1332 u8 type = arg.type; 1333 u8 path = arg.path; 1334 u8 gband = arg.gain_band; 1335 int i; 1336 1337 switch (type) { 1338 case 0: 1339 for (i = 0; i < 4; i++, data >>= 8) 1340 gain->lna_op1db[gband][path][i] = data & 0xff; 1341 break; 1342 case 1: 1343 for (i = 4; i < 7; i++, data >>= 8) 1344 gain->lna_op1db[gband][path][i] = data & 0xff; 1345 break; 1346 case 2: 1347 for (i = 0; i < 4; i++, data >>= 8) 1348 gain->tia_lna_op1db[gband][path][i] = data & 0xff; 1349 break; 1350 case 3: 1351 for (i = 4; i < 8; i++, data >>= 8) 1352 gain->tia_lna_op1db[gband][path][i] = data & 0xff; 1353 break; 1354 default: 1355 rtw89_warn(rtwdev, 1356 "bb gain op1db {0x%x:0x%x} with unknown type: %d\n", 1357 arg.addr, data, type); 1358 break; 1359 } 1360 } 1361 1362 static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev, 1363 const struct rtw89_reg2_def *reg, 1364 enum rtw89_rf_path rf_path, 1365 void *extra_data) 1366 { 1367 const struct rtw89_chip_info *chip = rtwdev->chip; 1368 union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr }; 1369 struct rtw89_efuse *efuse = &rtwdev->efuse; 1370 1371 if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR) 1372 return; 1373 1374 if (arg.path >= chip->rf_path_num) 1375 return; 1376 1377 if (arg.addr >= 0xf9 && arg.addr <= 0xfe) { 1378 rtw89_warn(rtwdev, "bb gain table with flow ctrl\n"); 1379 return; 1380 } 1381 1382 switch (arg.cfg_type) { 1383 case 0: 1384 rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data); 1385 break; 1386 case 1: 1387 rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data); 1388 break; 1389 case 2: 1390 rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data); 1391 break; 1392 case 3: 1393 rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data); 1394 break; 1395 case 4: 1396 /* This cfg_type is only used by rfe_type >= 50 with eFEM */ 1397 if (efuse->rfe_type < 50) 1398 break; 1399 fallthrough; 1400 default: 1401 rtw89_warn(rtwdev, 1402 "bb gain {0x%x:0x%x} with unknown cfg type: %d\n", 1403 arg.addr, reg->data, arg.cfg_type); 1404 break; 1405 } 1406 } 1407 1408 static void 1409 rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, 1410 const struct rtw89_reg2_def *reg, 1411 enum rtw89_rf_path rf_path, 1412 struct rtw89_fw_h2c_rf_reg_info *info) 1413 { 1414 u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE; 1415 u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; 1416 1417 if (page >= RTW89_H2C_RF_PAGE_NUM) { 1418 rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d", 1419 rf_path, info->curr_idx); 1420 return; 1421 } 1422 1423 info->rtw89_phy_config_rf_h2c[page][idx] = 1424 cpu_to_le32((reg->addr << 20) | reg->data); 1425 info->curr_idx++; 1426 } 1427 1428 static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev, 1429 struct rtw89_fw_h2c_rf_reg_info *info) 1430 { 1431 u16 remain = info->curr_idx; 1432 u16 len = 0; 1433 u8 i; 1434 int ret = 0; 1435 1436 if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) { 1437 rtw89_warn(rtwdev, 1438 "rf reg h2c total len %d larger than %d\n", 1439 remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE); 1440 ret = -EINVAL; 1441 goto out; 1442 } 1443 1444 for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) { 1445 len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain; 1446 ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i); 1447 if (ret) 1448 goto out; 1449 } 1450 out: 1451 info->curr_idx = 0; 1452 1453 return ret; 1454 } 1455 1456 static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev, 1457 const struct rtw89_reg2_def *reg, 1458 enum rtw89_rf_path rf_path, 1459 void *extra_data) 1460 { 1461 u32 addr = reg->addr; 1462 1463 if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb || 1464 addr == 0xfa || addr == 0xf9) 1465 return; 1466 1467 if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100) 1468 return; 1469 1470 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1471 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1472 } 1473 1474 static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev, 1475 const struct rtw89_reg2_def *reg, 1476 enum rtw89_rf_path rf_path, 1477 void *extra_data) 1478 { 1479 if (reg->addr == 0xfe) { 1480 mdelay(50); 1481 } else if (reg->addr == 0xfd) { 1482 mdelay(5); 1483 } else if (reg->addr == 0xfc) { 1484 mdelay(1); 1485 } else if (reg->addr == 0xfb) { 1486 udelay(50); 1487 } else if (reg->addr == 0xfa) { 1488 udelay(5); 1489 } else if (reg->addr == 0xf9) { 1490 udelay(1); 1491 } else { 1492 rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data); 1493 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1494 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1495 } 1496 } 1497 1498 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev, 1499 const struct rtw89_reg2_def *reg, 1500 enum rtw89_rf_path rf_path, 1501 void *extra_data) 1502 { 1503 rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data); 1504 1505 if (reg->addr < 0x100) 1506 return; 1507 1508 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1509 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1510 } 1511 EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1); 1512 1513 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev, 1514 const struct rtw89_phy_table *table, 1515 u32 *headline_size, u32 *headline_idx, 1516 u8 rfe, u8 cv) 1517 { 1518 const struct rtw89_reg2_def *reg; 1519 u32 headline; 1520 u32 compare, target; 1521 u8 rfe_para, cv_para; 1522 u8 cv_max = 0; 1523 bool case_matched = false; 1524 u32 i; 1525 1526 for (i = 0; i < table->n_regs; i++) { 1527 reg = &table->regs[i]; 1528 headline = get_phy_headline(reg->addr); 1529 if (headline != PHY_HEADLINE_VALID) 1530 break; 1531 } 1532 *headline_size = i; 1533 if (*headline_size == 0) 1534 return 0; 1535 1536 /* case 1: RFE match, CV match */ 1537 compare = get_phy_compare(rfe, cv); 1538 for (i = 0; i < *headline_size; i++) { 1539 reg = &table->regs[i]; 1540 target = get_phy_target(reg->addr); 1541 if (target == compare) { 1542 *headline_idx = i; 1543 return 0; 1544 } 1545 } 1546 1547 /* case 2: RFE match, CV don't care */ 1548 compare = get_phy_compare(rfe, PHY_COND_DONT_CARE); 1549 for (i = 0; i < *headline_size; i++) { 1550 reg = &table->regs[i]; 1551 target = get_phy_target(reg->addr); 1552 if (target == compare) { 1553 *headline_idx = i; 1554 return 0; 1555 } 1556 } 1557 1558 /* case 3: RFE match, CV max in table */ 1559 for (i = 0; i < *headline_size; i++) { 1560 reg = &table->regs[i]; 1561 rfe_para = get_phy_cond_rfe(reg->addr); 1562 cv_para = get_phy_cond_cv(reg->addr); 1563 if (rfe_para == rfe) { 1564 if (cv_para >= cv_max) { 1565 cv_max = cv_para; 1566 *headline_idx = i; 1567 case_matched = true; 1568 } 1569 } 1570 } 1571 1572 if (case_matched) 1573 return 0; 1574 1575 /* case 4: RFE don't care, CV max in table */ 1576 for (i = 0; i < *headline_size; i++) { 1577 reg = &table->regs[i]; 1578 rfe_para = get_phy_cond_rfe(reg->addr); 1579 cv_para = get_phy_cond_cv(reg->addr); 1580 if (rfe_para == PHY_COND_DONT_CARE) { 1581 if (cv_para >= cv_max) { 1582 cv_max = cv_para; 1583 *headline_idx = i; 1584 case_matched = true; 1585 } 1586 } 1587 } 1588 1589 if (case_matched) 1590 return 0; 1591 1592 return -EINVAL; 1593 } 1594 1595 static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev, 1596 const struct rtw89_phy_table *table, 1597 void (*config)(struct rtw89_dev *rtwdev, 1598 const struct rtw89_reg2_def *reg, 1599 enum rtw89_rf_path rf_path, 1600 void *data), 1601 void *extra_data) 1602 { 1603 const struct rtw89_reg2_def *reg; 1604 enum rtw89_rf_path rf_path = table->rf_path; 1605 u8 rfe = rtwdev->efuse.rfe_type; 1606 u8 cv = rtwdev->hal.cv; 1607 u32 i; 1608 u32 headline_size = 0, headline_idx = 0; 1609 u32 target = 0, cfg_target; 1610 u8 cond; 1611 bool is_matched = true; 1612 bool target_found = false; 1613 int ret; 1614 1615 ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size, 1616 &headline_idx, rfe, cv); 1617 if (ret) { 1618 rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv); 1619 return; 1620 } 1621 1622 cfg_target = get_phy_target(table->regs[headline_idx].addr); 1623 for (i = headline_size; i < table->n_regs; i++) { 1624 reg = &table->regs[i]; 1625 cond = get_phy_cond(reg->addr); 1626 switch (cond) { 1627 case PHY_COND_BRANCH_IF: 1628 case PHY_COND_BRANCH_ELIF: 1629 target = get_phy_target(reg->addr); 1630 break; 1631 case PHY_COND_BRANCH_ELSE: 1632 is_matched = false; 1633 if (!target_found) { 1634 rtw89_warn(rtwdev, "failed to load CR %x/%x\n", 1635 reg->addr, reg->data); 1636 return; 1637 } 1638 break; 1639 case PHY_COND_BRANCH_END: 1640 is_matched = true; 1641 target_found = false; 1642 break; 1643 case PHY_COND_CHECK: 1644 if (target_found) { 1645 is_matched = false; 1646 break; 1647 } 1648 1649 if (target == cfg_target) { 1650 is_matched = true; 1651 target_found = true; 1652 } else { 1653 is_matched = false; 1654 target_found = false; 1655 } 1656 break; 1657 default: 1658 if (is_matched) 1659 config(rtwdev, reg, rf_path, extra_data); 1660 break; 1661 } 1662 } 1663 } 1664 1665 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev) 1666 { 1667 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1668 const struct rtw89_chip_info *chip = rtwdev->chip; 1669 const struct rtw89_phy_table *bb_table; 1670 const struct rtw89_phy_table *bb_gain_table; 1671 1672 bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table; 1673 rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL); 1674 if (rtwdev->dbcc_en) 1675 rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, 1676 (void *)RTW89_PHY_1); 1677 1678 rtw89_chip_init_txpwr_unit(rtwdev); 1679 1680 bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table; 1681 if (bb_gain_table) 1682 rtw89_phy_init_reg(rtwdev, bb_gain_table, 1683 chip->phy_def->config_bb_gain, NULL); 1684 1685 rtw89_phy_bb_reset(rtwdev); 1686 } 1687 1688 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev) 1689 { 1690 rtw89_phy_write32(rtwdev, 0x8080, 0x4); 1691 udelay(1); 1692 return rtw89_phy_read32(rtwdev, 0x8080); 1693 } 1694 1695 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio) 1696 { 1697 void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg, 1698 enum rtw89_rf_path rf_path, void *data); 1699 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1700 const struct rtw89_chip_info *chip = rtwdev->chip; 1701 const struct rtw89_phy_table *rf_table; 1702 struct rtw89_fw_h2c_rf_reg_info *rf_reg_info; 1703 u8 path; 1704 1705 rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL); 1706 if (!rf_reg_info) 1707 return; 1708 1709 for (path = RF_PATH_A; path < chip->rf_path_num; path++) { 1710 rf_table = elm_info->rf_radio[path] ? 1711 elm_info->rf_radio[path] : chip->rf_table[path]; 1712 rf_reg_info->rf_path = rf_table->rf_path; 1713 if (noio) 1714 config = rtw89_phy_config_rf_reg_noio; 1715 else 1716 config = rf_table->config ? rf_table->config : 1717 rtw89_phy_config_rf_reg; 1718 rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info); 1719 if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info)) 1720 rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n", 1721 rf_reg_info->rf_path); 1722 } 1723 kfree(rf_reg_info); 1724 } 1725 1726 static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev) 1727 { 1728 const struct rtw89_chip_info *chip = rtwdev->chip; 1729 u32 val; 1730 int ret; 1731 1732 /* IQK/DPK clock & reset */ 1733 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3); 1734 rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1); 1735 rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000); 1736 if (chip->chip_id != RTL8851B) 1737 rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000); 1738 if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT) 1739 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2); 1740 1741 /* check 0x8080 */ 1742 rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8); 1743 1744 ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10, 1745 1000, false, rtwdev); 1746 if (ret) 1747 rtw89_err(rtwdev, "failed to poll nctl block\n"); 1748 } 1749 1750 static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev) 1751 { 1752 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1753 const struct rtw89_chip_info *chip = rtwdev->chip; 1754 const struct rtw89_phy_table *nctl_table; 1755 1756 rtw89_phy_preinit_rf_nctl(rtwdev); 1757 1758 nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table; 1759 rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL); 1760 1761 if (chip->nctl_post_table) 1762 rtw89_rfk_parser(rtwdev, chip->nctl_post_table); 1763 } 1764 1765 static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr) 1766 { 1767 u32 phy_page = addr >> 8; 1768 u32 ofst = 0; 1769 1770 switch (phy_page) { 1771 case 0x6: 1772 case 0x7: 1773 case 0x8: 1774 case 0x9: 1775 case 0xa: 1776 case 0xb: 1777 case 0xc: 1778 case 0xd: 1779 case 0x19: 1780 case 0x1a: 1781 case 0x1b: 1782 ofst = 0x2000; 1783 break; 1784 default: 1785 /* warning case */ 1786 ofst = 0; 1787 break; 1788 } 1789 1790 if (phy_page >= 0x40 && phy_page <= 0x4f) 1791 ofst = 0x2000; 1792 1793 return ofst; 1794 } 1795 1796 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1797 u32 data, enum rtw89_phy_idx phy_idx) 1798 { 1799 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1800 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1801 rtw89_phy_write32_mask(rtwdev, addr, mask, data); 1802 } 1803 EXPORT_SYMBOL(rtw89_phy_write32_idx); 1804 1805 void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits, 1806 enum rtw89_phy_idx phy_idx) 1807 { 1808 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1809 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1810 rtw89_phy_write32_set(rtwdev, addr, bits); 1811 } 1812 EXPORT_SYMBOL(rtw89_phy_write32_idx_set); 1813 1814 void rtw89_phy_write32_idx_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits, 1815 enum rtw89_phy_idx phy_idx) 1816 { 1817 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1818 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1819 rtw89_phy_write32_clr(rtwdev, addr, bits); 1820 } 1821 EXPORT_SYMBOL(rtw89_phy_write32_idx_clr); 1822 1823 u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1824 enum rtw89_phy_idx phy_idx) 1825 { 1826 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1827 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1828 return rtw89_phy_read32_mask(rtwdev, addr, mask); 1829 } 1830 EXPORT_SYMBOL(rtw89_phy_read32_idx); 1831 1832 void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1833 u32 val) 1834 { 1835 rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0); 1836 1837 if (!rtwdev->dbcc_en) 1838 return; 1839 1840 rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1); 1841 } 1842 EXPORT_SYMBOL(rtw89_phy_set_phy_regs); 1843 1844 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev, 1845 const struct rtw89_phy_reg3_tbl *tbl) 1846 { 1847 const struct rtw89_reg3_def *reg3; 1848 int i; 1849 1850 for (i = 0; i < tbl->size; i++) { 1851 reg3 = &tbl->reg3[i]; 1852 rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data); 1853 } 1854 } 1855 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl); 1856 1857 static const u8 rtw89_rs_idx_num_ax[] = { 1858 [RTW89_RS_CCK] = RTW89_RATE_CCK_NUM, 1859 [RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM, 1860 [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX, 1861 [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM, 1862 [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX, 1863 }; 1864 1865 static const u8 rtw89_rs_nss_num_ax[] = { 1866 [RTW89_RS_CCK] = 1, 1867 [RTW89_RS_OFDM] = 1, 1868 [RTW89_RS_MCS] = RTW89_NSS_NUM, 1869 [RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM, 1870 [RTW89_RS_OFFSET] = 1, 1871 }; 1872 1873 s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev, 1874 struct rtw89_txpwr_byrate *head, 1875 const struct rtw89_rate_desc *desc) 1876 { 1877 switch (desc->rs) { 1878 case RTW89_RS_CCK: 1879 return &head->cck[desc->idx]; 1880 case RTW89_RS_OFDM: 1881 return &head->ofdm[desc->idx]; 1882 case RTW89_RS_MCS: 1883 return &head->mcs[desc->ofdma][desc->nss][desc->idx]; 1884 case RTW89_RS_HEDCM: 1885 return &head->hedcm[desc->ofdma][desc->nss][desc->idx]; 1886 case RTW89_RS_OFFSET: 1887 return &head->offset[desc->idx]; 1888 default: 1889 rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs); 1890 return &head->trap; 1891 } 1892 } 1893 1894 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, 1895 const struct rtw89_txpwr_table *tbl) 1896 { 1897 const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data; 1898 const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size; 1899 struct rtw89_txpwr_byrate *byr_head; 1900 struct rtw89_rate_desc desc = {}; 1901 s8 *byr; 1902 u32 data; 1903 u8 i; 1904 1905 for (; cfg < end; cfg++) { 1906 byr_head = &rtwdev->byr[cfg->band][0]; 1907 desc.rs = cfg->rs; 1908 desc.nss = cfg->nss; 1909 data = cfg->data; 1910 1911 for (i = 0; i < cfg->len; i++, data >>= 8) { 1912 desc.idx = cfg->shf + i; 1913 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 1914 *byr = data & 0xff; 1915 } 1916 } 1917 } 1918 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate); 1919 1920 static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) 1921 { 1922 const struct rtw89_chip_info *chip = rtwdev->chip; 1923 1924 return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac); 1925 } 1926 1927 static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm) 1928 { 1929 const struct rtw89_chip_info *chip = rtwdev->chip; 1930 1931 return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63); 1932 } 1933 1934 static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm) 1935 { 1936 const u8 tssi_deviation_point = 0; 1937 const u8 tssi_max_deviation = 2; 1938 1939 if (dbm <= tssi_deviation_point) 1940 dbm -= tssi_max_deviation; 1941 1942 return dbm; 1943 } 1944 1945 static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band) 1946 { 1947 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 1948 const struct rtw89_reg_6ghz_tpe *tpe = ®ulatory->reg_6ghz_tpe; 1949 s8 cstr = S8_MAX; 1950 1951 if (band == RTW89_BAND_6G && tpe->valid) 1952 cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint); 1953 1954 return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr); 1955 } 1956 1957 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, 1958 const struct rtw89_rate_desc *rate_desc) 1959 { 1960 struct rtw89_txpwr_byrate *byr_head; 1961 s8 *byr; 1962 1963 if (rate_desc->rs == RTW89_RS_CCK) 1964 band = RTW89_BAND_2G; 1965 1966 byr_head = &rtwdev->byr[band][bw]; 1967 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc); 1968 1969 return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr); 1970 } 1971 1972 static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g) 1973 { 1974 switch (channel_6g) { 1975 case 1 ... 29: 1976 return (channel_6g - 1) / 2; 1977 case 33 ... 61: 1978 return (channel_6g - 3) / 2; 1979 case 65 ... 93: 1980 return (channel_6g - 5) / 2; 1981 case 97 ... 125: 1982 return (channel_6g - 7) / 2; 1983 case 129 ... 157: 1984 return (channel_6g - 9) / 2; 1985 case 161 ... 189: 1986 return (channel_6g - 11) / 2; 1987 case 193 ... 221: 1988 return (channel_6g - 13) / 2; 1989 case 225 ... 253: 1990 return (channel_6g - 15) / 2; 1991 default: 1992 rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g); 1993 return 0; 1994 } 1995 } 1996 1997 static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel) 1998 { 1999 if (band == RTW89_BAND_6G) 2000 return rtw89_channel_6g_to_idx(rtwdev, channel); 2001 2002 switch (channel) { 2003 case 1 ... 14: 2004 return channel - 1; 2005 case 36 ... 64: 2006 return (channel - 36) / 2; 2007 case 100 ... 144: 2008 return ((channel - 100) / 2) + 15; 2009 case 149 ... 177: 2010 return ((channel - 149) / 2) + 38; 2011 default: 2012 rtw89_warn(rtwdev, "unknown channel: %d\n", channel); 2013 return 0; 2014 } 2015 } 2016 2017 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, 2018 u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch) 2019 { 2020 const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; 2021 const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; 2022 const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; 2023 const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; 2024 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2025 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 2026 u32 freq = ieee80211_channel_to_frequency(ch, nl_band); 2027 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2028 u8 regd = rtw89_regd_get(rtwdev, band); 2029 u8 reg6 = regulatory->reg_6ghz_power; 2030 s8 lmt = 0, sar; 2031 s8 cstr; 2032 2033 switch (band) { 2034 case RTW89_BAND_2G: 2035 lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; 2036 if (lmt) 2037 break; 2038 2039 lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; 2040 break; 2041 case RTW89_BAND_5G: 2042 lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; 2043 if (lmt) 2044 break; 2045 2046 lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; 2047 break; 2048 case RTW89_BAND_6G: 2049 lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx]; 2050 if (lmt) 2051 break; 2052 2053 lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW] 2054 [RTW89_REG_6GHZ_POWER_DFLT] 2055 [ch_idx]; 2056 break; 2057 default: 2058 rtw89_warn(rtwdev, "unknown band type: %d\n", band); 2059 return 0; 2060 } 2061 2062 lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt); 2063 sar = rtw89_query_sar(rtwdev, freq); 2064 cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); 2065 2066 return min3(lmt, sar, cstr); 2067 } 2068 EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit); 2069 2070 #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \ 2071 do { \ 2072 u8 __i; \ 2073 for (__i = 0; __i < RTW89_BF_NUM; __i++) \ 2074 ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \ 2075 band, \ 2076 bw, ntx, \ 2077 rs, __i, \ 2078 (ch)); \ 2079 } while (0) 2080 2081 static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev, 2082 struct rtw89_txpwr_limit_ax *lmt, 2083 u8 band, u8 ntx, u8 ch) 2084 { 2085 __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, 2086 ntx, RTW89_RS_CCK, ch); 2087 __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40, 2088 ntx, RTW89_RS_CCK, ch); 2089 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2090 ntx, RTW89_RS_OFDM, ch); 2091 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2092 RTW89_CHANNEL_WIDTH_20, 2093 ntx, RTW89_RS_MCS, ch); 2094 } 2095 2096 static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev, 2097 struct rtw89_txpwr_limit_ax *lmt, 2098 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2099 { 2100 __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, 2101 ntx, RTW89_RS_CCK, ch - 2); 2102 __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40, 2103 ntx, RTW89_RS_CCK, ch); 2104 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2105 ntx, RTW89_RS_OFDM, pri_ch); 2106 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2107 RTW89_CHANNEL_WIDTH_20, 2108 ntx, RTW89_RS_MCS, ch - 2); 2109 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2110 RTW89_CHANNEL_WIDTH_20, 2111 ntx, RTW89_RS_MCS, ch + 2); 2112 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2113 RTW89_CHANNEL_WIDTH_40, 2114 ntx, RTW89_RS_MCS, ch); 2115 } 2116 2117 static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev, 2118 struct rtw89_txpwr_limit_ax *lmt, 2119 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2120 { 2121 s8 val_0p5_n[RTW89_BF_NUM]; 2122 s8 val_0p5_p[RTW89_BF_NUM]; 2123 u8 i; 2124 2125 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2126 ntx, RTW89_RS_OFDM, pri_ch); 2127 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2128 RTW89_CHANNEL_WIDTH_20, 2129 ntx, RTW89_RS_MCS, ch - 6); 2130 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2131 RTW89_CHANNEL_WIDTH_20, 2132 ntx, RTW89_RS_MCS, ch - 2); 2133 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band, 2134 RTW89_CHANNEL_WIDTH_20, 2135 ntx, RTW89_RS_MCS, ch + 2); 2136 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band, 2137 RTW89_CHANNEL_WIDTH_20, 2138 ntx, RTW89_RS_MCS, ch + 6); 2139 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2140 RTW89_CHANNEL_WIDTH_40, 2141 ntx, RTW89_RS_MCS, ch - 4); 2142 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band, 2143 RTW89_CHANNEL_WIDTH_40, 2144 ntx, RTW89_RS_MCS, ch + 4); 2145 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band, 2146 RTW89_CHANNEL_WIDTH_80, 2147 ntx, RTW89_RS_MCS, ch); 2148 2149 __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40, 2150 ntx, RTW89_RS_MCS, ch - 4); 2151 __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40, 2152 ntx, RTW89_RS_MCS, ch + 4); 2153 2154 for (i = 0; i < RTW89_BF_NUM; i++) 2155 lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); 2156 } 2157 2158 static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev, 2159 struct rtw89_txpwr_limit_ax *lmt, 2160 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2161 { 2162 s8 val_0p5_n[RTW89_BF_NUM]; 2163 s8 val_0p5_p[RTW89_BF_NUM]; 2164 s8 val_2p5_n[RTW89_BF_NUM]; 2165 s8 val_2p5_p[RTW89_BF_NUM]; 2166 u8 i; 2167 2168 /* fill ofdm section */ 2169 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2170 ntx, RTW89_RS_OFDM, pri_ch); 2171 2172 /* fill mcs 20m section */ 2173 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2174 RTW89_CHANNEL_WIDTH_20, 2175 ntx, RTW89_RS_MCS, ch - 14); 2176 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2177 RTW89_CHANNEL_WIDTH_20, 2178 ntx, RTW89_RS_MCS, ch - 10); 2179 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band, 2180 RTW89_CHANNEL_WIDTH_20, 2181 ntx, RTW89_RS_MCS, ch - 6); 2182 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band, 2183 RTW89_CHANNEL_WIDTH_20, 2184 ntx, RTW89_RS_MCS, ch - 2); 2185 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band, 2186 RTW89_CHANNEL_WIDTH_20, 2187 ntx, RTW89_RS_MCS, ch + 2); 2188 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band, 2189 RTW89_CHANNEL_WIDTH_20, 2190 ntx, RTW89_RS_MCS, ch + 6); 2191 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band, 2192 RTW89_CHANNEL_WIDTH_20, 2193 ntx, RTW89_RS_MCS, ch + 10); 2194 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band, 2195 RTW89_CHANNEL_WIDTH_20, 2196 ntx, RTW89_RS_MCS, ch + 14); 2197 2198 /* fill mcs 40m section */ 2199 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2200 RTW89_CHANNEL_WIDTH_40, 2201 ntx, RTW89_RS_MCS, ch - 12); 2202 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band, 2203 RTW89_CHANNEL_WIDTH_40, 2204 ntx, RTW89_RS_MCS, ch - 4); 2205 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band, 2206 RTW89_CHANNEL_WIDTH_40, 2207 ntx, RTW89_RS_MCS, ch + 4); 2208 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band, 2209 RTW89_CHANNEL_WIDTH_40, 2210 ntx, RTW89_RS_MCS, ch + 12); 2211 2212 /* fill mcs 80m section */ 2213 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band, 2214 RTW89_CHANNEL_WIDTH_80, 2215 ntx, RTW89_RS_MCS, ch - 8); 2216 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band, 2217 RTW89_CHANNEL_WIDTH_80, 2218 ntx, RTW89_RS_MCS, ch + 8); 2219 2220 /* fill mcs 160m section */ 2221 __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band, 2222 RTW89_CHANNEL_WIDTH_160, 2223 ntx, RTW89_RS_MCS, ch); 2224 2225 /* fill mcs 40m 0p5 section */ 2226 __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40, 2227 ntx, RTW89_RS_MCS, ch - 4); 2228 __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40, 2229 ntx, RTW89_RS_MCS, ch + 4); 2230 2231 for (i = 0; i < RTW89_BF_NUM; i++) 2232 lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); 2233 2234 /* fill mcs 40m 2p5 section */ 2235 __fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40, 2236 ntx, RTW89_RS_MCS, ch - 8); 2237 __fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40, 2238 ntx, RTW89_RS_MCS, ch + 8); 2239 2240 for (i = 0; i < RTW89_BF_NUM; i++) 2241 lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]); 2242 } 2243 2244 static 2245 void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev, 2246 const struct rtw89_chan *chan, 2247 struct rtw89_txpwr_limit_ax *lmt, 2248 u8 ntx) 2249 { 2250 u8 band = chan->band_type; 2251 u8 pri_ch = chan->primary_channel; 2252 u8 ch = chan->channel; 2253 u8 bw = chan->band_width; 2254 2255 memset(lmt, 0, sizeof(*lmt)); 2256 2257 switch (bw) { 2258 case RTW89_CHANNEL_WIDTH_20: 2259 rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch); 2260 break; 2261 case RTW89_CHANNEL_WIDTH_40: 2262 rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch, 2263 pri_ch); 2264 break; 2265 case RTW89_CHANNEL_WIDTH_80: 2266 rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch, 2267 pri_ch); 2268 break; 2269 case RTW89_CHANNEL_WIDTH_160: 2270 rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch, 2271 pri_ch); 2272 break; 2273 } 2274 } 2275 2276 s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, 2277 u8 ru, u8 ntx, u8 ch) 2278 { 2279 const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; 2280 const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; 2281 const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; 2282 const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; 2283 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2284 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 2285 u32 freq = ieee80211_channel_to_frequency(ch, nl_band); 2286 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2287 u8 regd = rtw89_regd_get(rtwdev, band); 2288 u8 reg6 = regulatory->reg_6ghz_power; 2289 s8 lmt_ru = 0, sar; 2290 s8 cstr; 2291 2292 switch (band) { 2293 case RTW89_BAND_2G: 2294 lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx]; 2295 if (lmt_ru) 2296 break; 2297 2298 lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; 2299 break; 2300 case RTW89_BAND_5G: 2301 lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx]; 2302 if (lmt_ru) 2303 break; 2304 2305 lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; 2306 break; 2307 case RTW89_BAND_6G: 2308 lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx]; 2309 if (lmt_ru) 2310 break; 2311 2312 lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW] 2313 [RTW89_REG_6GHZ_POWER_DFLT] 2314 [ch_idx]; 2315 break; 2316 default: 2317 rtw89_warn(rtwdev, "unknown band type: %d\n", band); 2318 return 0; 2319 } 2320 2321 lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru); 2322 sar = rtw89_query_sar(rtwdev, freq); 2323 cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); 2324 2325 return min3(lmt_ru, sar, cstr); 2326 } 2327 2328 static void 2329 rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev, 2330 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2331 u8 band, u8 ntx, u8 ch) 2332 { 2333 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2334 RTW89_RU26, 2335 ntx, ch); 2336 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2337 RTW89_RU52, 2338 ntx, ch); 2339 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2340 RTW89_RU106, 2341 ntx, ch); 2342 } 2343 2344 static void 2345 rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev, 2346 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2347 u8 band, u8 ntx, u8 ch) 2348 { 2349 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2350 RTW89_RU26, 2351 ntx, ch - 2); 2352 lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2353 RTW89_RU26, 2354 ntx, ch + 2); 2355 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2356 RTW89_RU52, 2357 ntx, ch - 2); 2358 lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2359 RTW89_RU52, 2360 ntx, ch + 2); 2361 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2362 RTW89_RU106, 2363 ntx, ch - 2); 2364 lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2365 RTW89_RU106, 2366 ntx, ch + 2); 2367 } 2368 2369 static void 2370 rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev, 2371 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2372 u8 band, u8 ntx, u8 ch) 2373 { 2374 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2375 RTW89_RU26, 2376 ntx, ch - 6); 2377 lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2378 RTW89_RU26, 2379 ntx, ch - 2); 2380 lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2381 RTW89_RU26, 2382 ntx, ch + 2); 2383 lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2384 RTW89_RU26, 2385 ntx, ch + 6); 2386 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2387 RTW89_RU52, 2388 ntx, ch - 6); 2389 lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2390 RTW89_RU52, 2391 ntx, ch - 2); 2392 lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2393 RTW89_RU52, 2394 ntx, ch + 2); 2395 lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2396 RTW89_RU52, 2397 ntx, ch + 6); 2398 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2399 RTW89_RU106, 2400 ntx, ch - 6); 2401 lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2402 RTW89_RU106, 2403 ntx, ch - 2); 2404 lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2405 RTW89_RU106, 2406 ntx, ch + 2); 2407 lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2408 RTW89_RU106, 2409 ntx, ch + 6); 2410 } 2411 2412 static void 2413 rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev, 2414 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2415 u8 band, u8 ntx, u8 ch) 2416 { 2417 static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 }; 2418 int i; 2419 2420 static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX); 2421 for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) { 2422 lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2423 RTW89_RU26, 2424 ntx, 2425 ch + ofst[i]); 2426 lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2427 RTW89_RU52, 2428 ntx, 2429 ch + ofst[i]); 2430 lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2431 RTW89_RU106, 2432 ntx, 2433 ch + ofst[i]); 2434 } 2435 } 2436 2437 static 2438 void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, 2439 const struct rtw89_chan *chan, 2440 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2441 u8 ntx) 2442 { 2443 u8 band = chan->band_type; 2444 u8 ch = chan->channel; 2445 u8 bw = chan->band_width; 2446 2447 memset(lmt_ru, 0, sizeof(*lmt_ru)); 2448 2449 switch (bw) { 2450 case RTW89_CHANNEL_WIDTH_20: 2451 rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx, 2452 ch); 2453 break; 2454 case RTW89_CHANNEL_WIDTH_40: 2455 rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx, 2456 ch); 2457 break; 2458 case RTW89_CHANNEL_WIDTH_80: 2459 rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx, 2460 ch); 2461 break; 2462 case RTW89_CHANNEL_WIDTH_160: 2463 rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx, 2464 ch); 2465 break; 2466 } 2467 } 2468 2469 static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev, 2470 const struct rtw89_chan *chan, 2471 enum rtw89_phy_idx phy_idx) 2472 { 2473 u8 max_nss_num = rtwdev->chip->rf_path_num; 2474 static const u8 rs[] = { 2475 RTW89_RS_CCK, 2476 RTW89_RS_OFDM, 2477 RTW89_RS_MCS, 2478 RTW89_RS_HEDCM, 2479 }; 2480 struct rtw89_rate_desc cur = {}; 2481 u8 band = chan->band_type; 2482 u8 ch = chan->channel; 2483 u32 addr, val; 2484 s8 v[4] = {}; 2485 u8 i; 2486 2487 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2488 "[TXPWR] set txpwr byrate with ch=%d\n", ch); 2489 2490 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4); 2491 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4); 2492 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4); 2493 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4); 2494 2495 addr = R_AX_PWR_BY_RATE; 2496 for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) { 2497 for (i = 0; i < ARRAY_SIZE(rs); i++) { 2498 if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]]) 2499 continue; 2500 2501 cur.rs = rs[i]; 2502 for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]]; 2503 cur.idx++) { 2504 v[cur.idx % 4] = 2505 rtw89_phy_read_txpwr_byrate(rtwdev, 2506 band, 0, 2507 &cur); 2508 2509 if ((cur.idx + 1) % 4) 2510 continue; 2511 2512 val = FIELD_PREP(GENMASK(7, 0), v[0]) | 2513 FIELD_PREP(GENMASK(15, 8), v[1]) | 2514 FIELD_PREP(GENMASK(23, 16), v[2]) | 2515 FIELD_PREP(GENMASK(31, 24), v[3]); 2516 2517 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 2518 val); 2519 addr += 4; 2520 } 2521 } 2522 } 2523 } 2524 2525 static 2526 void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev, 2527 const struct rtw89_chan *chan, 2528 enum rtw89_phy_idx phy_idx) 2529 { 2530 struct rtw89_rate_desc desc = { 2531 .nss = RTW89_NSS_1, 2532 .rs = RTW89_RS_OFFSET, 2533 }; 2534 u8 band = chan->band_type; 2535 s8 v[RTW89_RATE_OFFSET_NUM_AX] = {}; 2536 u32 val; 2537 2538 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n"); 2539 2540 for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++) 2541 v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc); 2542 2543 BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5); 2544 val = FIELD_PREP(GENMASK(3, 0), v[0]) | 2545 FIELD_PREP(GENMASK(7, 4), v[1]) | 2546 FIELD_PREP(GENMASK(11, 8), v[2]) | 2547 FIELD_PREP(GENMASK(15, 12), v[3]) | 2548 FIELD_PREP(GENMASK(19, 16), v[4]); 2549 2550 rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL, 2551 GENMASK(19, 0), val); 2552 } 2553 2554 static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev, 2555 const struct rtw89_chan *chan, 2556 enum rtw89_phy_idx phy_idx) 2557 { 2558 u8 max_ntx_num = rtwdev->chip->rf_path_num; 2559 struct rtw89_txpwr_limit_ax lmt; 2560 u8 ch = chan->channel; 2561 u8 bw = chan->band_width; 2562 const s8 *ptr; 2563 u32 addr, val; 2564 u8 i, j; 2565 2566 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2567 "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw); 2568 2569 BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) != 2570 RTW89_TXPWR_LMT_PAGE_SIZE_AX); 2571 2572 addr = R_AX_PWR_LMT; 2573 for (i = 0; i < max_ntx_num; i++) { 2574 rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i); 2575 2576 ptr = (s8 *)&lmt; 2577 for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX; 2578 j += 4, addr += 4, ptr += 4) { 2579 val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | 2580 FIELD_PREP(GENMASK(15, 8), ptr[1]) | 2581 FIELD_PREP(GENMASK(23, 16), ptr[2]) | 2582 FIELD_PREP(GENMASK(31, 24), ptr[3]); 2583 2584 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); 2585 } 2586 } 2587 } 2588 2589 static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, 2590 const struct rtw89_chan *chan, 2591 enum rtw89_phy_idx phy_idx) 2592 { 2593 u8 max_ntx_num = rtwdev->chip->rf_path_num; 2594 struct rtw89_txpwr_limit_ru_ax lmt_ru; 2595 u8 ch = chan->channel; 2596 u8 bw = chan->band_width; 2597 const s8 *ptr; 2598 u32 addr, val; 2599 u8 i, j; 2600 2601 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2602 "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw); 2603 2604 BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) != 2605 RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX); 2606 2607 addr = R_AX_PWR_RU_LMT; 2608 for (i = 0; i < max_ntx_num; i++) { 2609 rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i); 2610 2611 ptr = (s8 *)&lmt_ru; 2612 for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX; 2613 j += 4, addr += 4, ptr += 4) { 2614 val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | 2615 FIELD_PREP(GENMASK(15, 8), ptr[1]) | 2616 FIELD_PREP(GENMASK(23, 16), ptr[2]) | 2617 FIELD_PREP(GENMASK(31, 24), ptr[3]); 2618 2619 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); 2620 } 2621 } 2622 } 2623 2624 struct rtw89_phy_iter_ra_data { 2625 struct rtw89_dev *rtwdev; 2626 struct sk_buff *c2h; 2627 }; 2628 2629 static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link, 2630 struct ieee80211_link_sta *link_sta, 2631 struct rtw89_phy_iter_ra_data *ra_data) 2632 { 2633 struct rtw89_dev *rtwdev = ra_data->rtwdev; 2634 const struct rtw89_c2h_ra_rpt *c2h = 2635 (const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data; 2636 struct rtw89_ra_report *ra_report = &rtwsta_link->ra_report; 2637 const struct rtw89_chip_info *chip = rtwdev->chip; 2638 bool format_v1 = chip->chip_gen == RTW89_CHIP_BE; 2639 u8 mode, rate, bw, giltf, mac_id; 2640 u16 legacy_bitrate; 2641 bool valid; 2642 u8 mcs = 0; 2643 u8 t; 2644 2645 mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID); 2646 if (mac_id != rtwsta_link->mac_id) 2647 return; 2648 2649 rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS); 2650 bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW); 2651 giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF); 2652 mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL); 2653 2654 if (format_v1) { 2655 t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7); 2656 rate |= u8_encode_bits(t, BIT(7)); 2657 t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2); 2658 bw |= u8_encode_bits(t, BIT(2)); 2659 t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2); 2660 mode |= u8_encode_bits(t, BIT(2)); 2661 } 2662 2663 if (mode == RTW89_RA_RPT_MODE_LEGACY) { 2664 valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate); 2665 if (!valid) 2666 return; 2667 } 2668 2669 memset(&ra_report->txrate, 0, sizeof(ra_report->txrate)); 2670 2671 switch (mode) { 2672 case RTW89_RA_RPT_MODE_LEGACY: 2673 ra_report->txrate.legacy = legacy_bitrate; 2674 break; 2675 case RTW89_RA_RPT_MODE_HT: 2676 ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS; 2677 if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw)) 2678 rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate), 2679 FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate)); 2680 else 2681 rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate); 2682 ra_report->txrate.mcs = rate; 2683 if (giltf) 2684 ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2685 mcs = ra_report->txrate.mcs & 0x07; 2686 break; 2687 case RTW89_RA_RPT_MODE_VHT: 2688 ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; 2689 ra_report->txrate.mcs = format_v1 ? 2690 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : 2691 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); 2692 ra_report->txrate.nss = format_v1 ? 2693 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : 2694 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; 2695 if (giltf) 2696 ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2697 mcs = ra_report->txrate.mcs; 2698 break; 2699 case RTW89_RA_RPT_MODE_HE: 2700 ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS; 2701 ra_report->txrate.mcs = format_v1 ? 2702 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : 2703 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); 2704 ra_report->txrate.nss = format_v1 ? 2705 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : 2706 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; 2707 if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) 2708 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8; 2709 else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) 2710 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6; 2711 else 2712 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2; 2713 mcs = ra_report->txrate.mcs; 2714 break; 2715 case RTW89_RA_RPT_MODE_EHT: 2716 ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS; 2717 ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1); 2718 ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1; 2719 if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) 2720 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8; 2721 else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) 2722 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6; 2723 else 2724 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2; 2725 mcs = ra_report->txrate.mcs; 2726 break; 2727 } 2728 2729 ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw); 2730 ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate); 2731 ra_report->hw_rate = format_v1 ? 2732 u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) | 2733 u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) : 2734 u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) | 2735 u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL); 2736 ra_report->might_fallback_legacy = mcs <= 2; 2737 link_sta->agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report); 2738 rtwsta_link->max_agg_wait = link_sta->agg.max_rc_amsdu_len / 1500 - 1; 2739 } 2740 2741 static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) 2742 { 2743 struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data; 2744 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 2745 struct rtw89_sta_link *rtwsta_link; 2746 struct ieee80211_link_sta *link_sta; 2747 unsigned int link_id; 2748 2749 rcu_read_lock(); 2750 2751 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 2752 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 2753 __rtw89_phy_c2h_ra_rpt_iter(rtwsta_link, link_sta, ra_data); 2754 } 2755 2756 rcu_read_unlock(); 2757 } 2758 2759 static void 2760 rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2761 { 2762 struct rtw89_phy_iter_ra_data ra_data; 2763 2764 ra_data.rtwdev = rtwdev; 2765 ra_data.c2h = c2h; 2766 ieee80211_iterate_stations_atomic(rtwdev->hw, 2767 rtw89_phy_c2h_ra_rpt_iter, 2768 &ra_data); 2769 } 2770 2771 static 2772 void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev, 2773 struct sk_buff *c2h, u32 len) = { 2774 [RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt, 2775 [RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL, 2776 [RTW89_PHY_C2H_FUNC_TXSTS] = NULL, 2777 }; 2778 2779 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev, 2780 enum rtw89_phy_c2h_rfk_log_func func, 2781 void *content, u16 len) 2782 { 2783 struct rtw89_c2h_rf_txgapk_rpt_log *txgapk; 2784 struct rtw89_c2h_rf_rxdck_rpt_log *rxdck; 2785 struct rtw89_c2h_rf_dack_rpt_log *dack; 2786 struct rtw89_c2h_rf_tssi_rpt_log *tssi; 2787 struct rtw89_c2h_rf_dpk_rpt_log *dpk; 2788 struct rtw89_c2h_rf_iqk_rpt_log *iqk; 2789 int i, j, k; 2790 2791 switch (func) { 2792 case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK: 2793 if (len != sizeof(*iqk)) 2794 goto out; 2795 2796 iqk = content; 2797 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2798 "[IQK] iqk->is_iqk_init = %x\n", iqk->is_iqk_init); 2799 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2800 "[IQK] iqk->is_reload = %x\n", iqk->is_reload); 2801 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2802 "[IQK] iqk->is_nbiqk = %x\n", iqk->is_nbiqk); 2803 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2804 "[IQK] iqk->txiqk_en = %x\n", iqk->txiqk_en); 2805 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2806 "[IQK] iqk->rxiqk_en = %x\n", iqk->rxiqk_en); 2807 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2808 "[IQK] iqk->lok_en = %x\n", iqk->lok_en); 2809 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2810 "[IQK] iqk->iqk_xym_en = %x\n", iqk->iqk_xym_en); 2811 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2812 "[IQK] iqk->iqk_sram_en = %x\n", iqk->iqk_sram_en); 2813 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2814 "[IQK] iqk->iqk_fft_en = %x\n", iqk->iqk_fft_en); 2815 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2816 "[IQK] iqk->is_fw_iqk = %x\n", iqk->is_fw_iqk); 2817 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2818 "[IQK] iqk->is_iqk_enable = %x\n", iqk->is_iqk_enable); 2819 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2820 "[IQK] iqk->iqk_cfir_en = %x\n", iqk->iqk_cfir_en); 2821 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2822 "[IQK] iqk->thermal_rek_en = %x\n", iqk->thermal_rek_en); 2823 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2824 "[IQK] iqk->version = %x\n", iqk->version); 2825 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2826 "[IQK] iqk->phy = %x\n", iqk->phy); 2827 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2828 "[IQK] iqk->fwk_status = %x\n", iqk->fwk_status); 2829 2830 for (i = 0; i < 2; i++) { 2831 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2832 "[IQK] ======== Path %x ========\n", i); 2833 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_band[%d] = %x\n", 2834 i, iqk->iqk_band[i]); 2835 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_ch[%d] = %x\n", 2836 i, iqk->iqk_ch[i]); 2837 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_bw[%d] = %x\n", 2838 i, iqk->iqk_bw[i]); 2839 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_idac[%d] = %x\n", 2840 i, le32_to_cpu(iqk->lok_idac[i])); 2841 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_vbuf[%d] = %x\n", 2842 i, le32_to_cpu(iqk->lok_vbuf[i])); 2843 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_tx_fail[%d] = %x\n", 2844 i, iqk->iqk_tx_fail[i]); 2845 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_rx_fail[%d] = %x\n", 2846 i, iqk->iqk_rx_fail[i]); 2847 for (j = 0; j < 4; j++) 2848 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2849 "[IQK] iqk->rftxgain[%d][%d] = %x\n", 2850 i, j, le32_to_cpu(iqk->rftxgain[i][j])); 2851 for (j = 0; j < 4; j++) 2852 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2853 "[IQK] iqk->tx_xym[%d][%d] = %x\n", 2854 i, j, le32_to_cpu(iqk->tx_xym[i][j])); 2855 for (j = 0; j < 4; j++) 2856 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2857 "[IQK] iqk->rfrxgain[%d][%d] = %x\n", 2858 i, j, le32_to_cpu(iqk->rfrxgain[i][j])); 2859 for (j = 0; j < 4; j++) 2860 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2861 "[IQK] iqk->rx_xym[%d][%d] = %x\n", 2862 i, j, le32_to_cpu(iqk->rx_xym[i][j])); 2863 } 2864 return; 2865 case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK: 2866 if (len != sizeof(*dpk)) 2867 goto out; 2868 2869 dpk = content; 2870 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2871 "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n", 2872 dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok); 2873 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2874 "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n", 2875 dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q); 2876 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2877 "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n", 2878 dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov); 2879 return; 2880 case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK: 2881 if (len != sizeof(*dack)) 2882 goto out; 2883 2884 dack = content; 2885 2886 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]FWDACK SUMMARY!!!!!\n"); 2887 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2888 "[DACK]FWDACK ver = 0x%x, FWDACK rpt_ver = 0x%x, driver rpt_ver = 0x%x\n", 2889 dack->fwdack_ver, dack->fwdack_info_ver, 0x2); 2890 2891 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2892 "[DACK]timeout code = [0x%x 0x%x 0x%x 0x%x 0x%x]\n", 2893 dack->addck_timeout, dack->cdack_timeout, dack->dadck_timeout, 2894 dack->adgaink_timeout, dack->msbk_timeout); 2895 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2896 "[DACK]DACK fail = 0x%x\n", dack->dack_fail); 2897 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2898 "[DACK]S0 WBADCK = [0x%x]\n", dack->wbdck_d[0]); 2899 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2900 "[DACK]S1 WBADCK = [0x%x]\n", dack->wbdck_d[1]); 2901 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2902 "[DACK]DRCK = [0x%x]\n", dack->rck_d); 2903 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n", 2904 dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]); 2905 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n", 2906 dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]); 2907 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n", 2908 dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]); 2909 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n", 2910 dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]); 2911 2912 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n", 2913 ((u32)dack->addck2_hd[0][0][0] << 8) | dack->addck2_ld[0][0][0], 2914 ((u32)dack->addck2_hd[0][0][1] << 8) | dack->addck2_ld[0][0][1]); 2915 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n", 2916 ((u32)dack->addck2_hd[0][1][0] << 8) | dack->addck2_ld[0][1][0], 2917 ((u32)dack->addck2_hd[0][1][1] << 8) | dack->addck2_ld[0][1][1]); 2918 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n", 2919 ((u32)dack->addck2_hd[1][0][0] << 8) | dack->addck2_ld[1][0][0], 2920 ((u32)dack->addck2_hd[1][0][1] << 8) | dack->addck2_ld[1][0][1]); 2921 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n", 2922 ((u32)dack->addck2_hd[1][1][0] << 8) | dack->addck2_ld[1][1][0], 2923 ((u32)dack->addck2_hd[1][1][1] << 8) | dack->addck2_ld[1][1][1]); 2924 2925 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n", 2926 dack->adgaink_d[0][0], dack->adgaink_d[0][1]); 2927 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n", 2928 dack->adgaink_d[1][0], dack->adgaink_d[1][1]); 2929 2930 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 2931 dack->dadck_d[0][0], dack->dadck_d[0][1]); 2932 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", 2933 dack->dadck_d[1][0], dack->dadck_d[1][1]); 2934 2935 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n", 2936 ((u32)dack->biask_hd[0][0] << 8) | dack->biask_ld[0][0]); 2937 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n", 2938 ((u32)dack->biask_hd[1][0] << 8) | dack->biask_ld[1][0]); 2939 2940 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); 2941 for (i = 0; i < 0x10; i++) 2942 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 2943 dack->msbk_d[0][0][i]); 2944 2945 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); 2946 for (i = 0; i < 0x10; i++) 2947 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 2948 dack->msbk_d[0][1][i]); 2949 2950 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); 2951 for (i = 0; i < 0x10; i++) 2952 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 2953 dack->msbk_d[1][0][i]); 2954 2955 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); 2956 for (i = 0; i < 0x10; i++) 2957 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 2958 dack->msbk_d[1][1][i]); 2959 return; 2960 case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK: 2961 if (len != sizeof(*rxdck)) 2962 goto out; 2963 2964 rxdck = content; 2965 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2966 "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n", 2967 rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch, 2968 rxdck->timeout); 2969 return; 2970 case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI: 2971 if (len != sizeof(*tssi)) 2972 goto out; 2973 2974 tssi = content; 2975 for (i = 0; i < 2; i++) { 2976 for (j = 0; j < 2; j++) { 2977 for (k = 0; k < 4; k++) { 2978 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2979 "[TSSI] alignment_power_cw_h[%d][%d][%d]=%d\n", 2980 i, j, k, tssi->alignment_power_cw_h[i][j][k]); 2981 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2982 "[TSSI] alignment_power_cw_l[%d][%d][%d]=%d\n", 2983 i, j, k, tssi->alignment_power_cw_l[i][j][k]); 2984 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2985 "[TSSI] alignment_power[%d][%d][%d]=%d\n", 2986 i, j, k, tssi->alignment_power[i][j][k]); 2987 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2988 "[TSSI] alignment_power_cw[%d][%d][%d]=%d\n", 2989 i, j, k, 2990 (tssi->alignment_power_cw_h[i][j][k] << 8) + 2991 tssi->alignment_power_cw_l[i][j][k]); 2992 } 2993 2994 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2995 "[TSSI] tssi_alimk_state[%d][%d]=%d\n", 2996 i, j, tssi->tssi_alimk_state[i][j]); 2997 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2998 "[TSSI] default_txagc_offset[%d]=%d\n", 2999 j, tssi->default_txagc_offset[0][j]); 3000 } 3001 } 3002 return; 3003 case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK: 3004 if (len != sizeof(*txgapk)) 3005 goto out; 3006 3007 txgapk = content; 3008 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3009 "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n", 3010 le32_to_cpu(txgapk->r0x8010[0]), 3011 le32_to_cpu(txgapk->r0x8010[1])); 3012 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n", 3013 txgapk->chk_id); 3014 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n", 3015 le32_to_cpu(txgapk->chk_cnt)); 3016 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n", 3017 txgapk->ver); 3018 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt rsv1 = %d\n", 3019 txgapk->rsv1); 3020 3021 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n", 3022 (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]); 3023 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n", 3024 (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]); 3025 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n", 3026 (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]); 3027 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n", 3028 (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]); 3029 return; 3030 default: 3031 break; 3032 } 3033 3034 out: 3035 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3036 "unexpected RFK func %d report log with length %d\n", func, len); 3037 } 3038 3039 static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev, 3040 enum rtw89_phy_c2h_rfk_log_func func, 3041 void *content, u16 len) 3042 { 3043 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 3044 const struct rtw89_c2h_rf_run_log *log = content; 3045 const struct rtw89_fw_element_hdr *elm; 3046 u32 fmt_idx; 3047 u16 offset; 3048 3049 if (sizeof(*log) != len) 3050 return false; 3051 3052 if (!elm_info->rfk_log_fmt) 3053 return false; 3054 3055 elm = elm_info->rfk_log_fmt->elm[func]; 3056 fmt_idx = le32_to_cpu(log->fmt_idx); 3057 if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr) 3058 return false; 3059 3060 offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]); 3061 if (offset == 0) 3062 return false; 3063 3064 rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset], 3065 le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]), 3066 le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3])); 3067 3068 return true; 3069 } 3070 3071 static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 3072 u32 len, enum rtw89_phy_c2h_rfk_log_func func, 3073 const char *rfk_name) 3074 { 3075 struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data; 3076 struct rtw89_c2h_rf_log_hdr *log_hdr; 3077 void *log_ptr = c2h_hdr; 3078 u16 content_len; 3079 u16 chunk_len; 3080 bool handled; 3081 3082 if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK)) 3083 return; 3084 3085 log_ptr += sizeof(*c2h_hdr); 3086 len -= sizeof(*c2h_hdr); 3087 3088 while (len > sizeof(*log_hdr)) { 3089 log_hdr = log_ptr; 3090 content_len = le16_to_cpu(log_hdr->len); 3091 chunk_len = content_len + sizeof(*log_hdr); 3092 3093 if (chunk_len > len) 3094 break; 3095 3096 switch (log_hdr->type) { 3097 case RTW89_RF_RUN_LOG: 3098 handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func, 3099 log_hdr->content, content_len); 3100 if (handled) 3101 break; 3102 3103 rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n", 3104 rfk_name, content_len, log_hdr->content); 3105 break; 3106 case RTW89_RF_RPT_LOG: 3107 rtw89_phy_c2h_rfk_rpt_log(rtwdev, func, 3108 log_hdr->content, content_len); 3109 break; 3110 default: 3111 return; 3112 } 3113 3114 log_ptr += chunk_len; 3115 len -= chunk_len; 3116 } 3117 } 3118 3119 static void 3120 rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3121 { 3122 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3123 RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK"); 3124 } 3125 3126 static void 3127 rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3128 { 3129 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3130 RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK"); 3131 } 3132 3133 static void 3134 rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3135 { 3136 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3137 RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK"); 3138 } 3139 3140 static void 3141 rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3142 { 3143 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3144 RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK"); 3145 } 3146 3147 static void 3148 rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3149 { 3150 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3151 RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI"); 3152 } 3153 3154 static void 3155 rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3156 { 3157 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3158 RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK"); 3159 } 3160 3161 static 3162 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev, 3163 struct sk_buff *c2h, u32 len) = { 3164 [RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk, 3165 [RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk, 3166 [RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack, 3167 [RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck, 3168 [RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi, 3169 [RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk, 3170 }; 3171 3172 static 3173 void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev) 3174 { 3175 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3176 3177 wait->state = RTW89_RFK_STATE_START; 3178 wait->start_time = ktime_get(); 3179 reinit_completion(&wait->completion); 3180 } 3181 3182 static 3183 int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name, 3184 unsigned int ms) 3185 { 3186 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3187 unsigned long time_left; 3188 3189 /* Since we can't receive C2H event during SER, use a fixed delay. */ 3190 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) { 3191 fsleep(1000 * ms / 2); 3192 goto out; 3193 } 3194 3195 time_left = wait_for_completion_timeout(&wait->completion, 3196 msecs_to_jiffies(ms)); 3197 if (time_left == 0) { 3198 rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name); 3199 return -ETIMEDOUT; 3200 } else if (wait->state != RTW89_RFK_STATE_OK) { 3201 rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n", 3202 rfk_name, wait->state); 3203 return -EFAULT; 3204 } 3205 3206 out: 3207 rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n", 3208 rfk_name, ktime_ms_delta(ktime_get(), wait->start_time)); 3209 3210 return 0; 3211 } 3212 3213 static void 3214 rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3215 { 3216 const struct rtw89_c2h_rfk_report *report = 3217 (const struct rtw89_c2h_rfk_report *)c2h->data; 3218 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3219 3220 wait->state = report->state; 3221 wait->version = report->version; 3222 3223 complete(&wait->completion); 3224 3225 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3226 "RFK report state %d with version %d (%*ph)\n", 3227 wait->state, wait->version, 3228 (int)(len - sizeof(report->hdr)), &report->state); 3229 } 3230 3231 static 3232 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev, 3233 struct sk_buff *c2h, u32 len) = { 3234 [RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state, 3235 }; 3236 3237 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func) 3238 { 3239 switch (class) { 3240 case RTW89_PHY_C2H_RFK_LOG: 3241 switch (func) { 3242 case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK: 3243 case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK: 3244 case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK: 3245 case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK: 3246 case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI: 3247 case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK: 3248 return true; 3249 default: 3250 return false; 3251 } 3252 case RTW89_PHY_C2H_RFK_REPORT: 3253 switch (func) { 3254 case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE: 3255 return true; 3256 default: 3257 return false; 3258 } 3259 default: 3260 return false; 3261 } 3262 } 3263 3264 void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3265 u32 len, u8 class, u8 func) 3266 { 3267 void (*handler)(struct rtw89_dev *rtwdev, 3268 struct sk_buff *c2h, u32 len) = NULL; 3269 3270 switch (class) { 3271 case RTW89_PHY_C2H_CLASS_RA: 3272 if (func < RTW89_PHY_C2H_FUNC_RA_MAX) 3273 handler = rtw89_phy_c2h_ra_handler[func]; 3274 break; 3275 case RTW89_PHY_C2H_RFK_LOG: 3276 if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler)) 3277 handler = rtw89_phy_c2h_rfk_log_handler[func]; 3278 break; 3279 case RTW89_PHY_C2H_RFK_REPORT: 3280 if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler)) 3281 handler = rtw89_phy_c2h_rfk_report_handler[func]; 3282 break; 3283 case RTW89_PHY_C2H_CLASS_DM: 3284 if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY) 3285 return; 3286 fallthrough; 3287 default: 3288 rtw89_info(rtwdev, "c2h class %d not support\n", class); 3289 return; 3290 } 3291 if (!handler) { 3292 rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, 3293 func); 3294 return; 3295 } 3296 handler(rtwdev, skb, len); 3297 } 3298 3299 int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev, 3300 enum rtw89_phy_idx phy_idx, 3301 unsigned int ms) 3302 { 3303 int ret; 3304 3305 rtw89_phy_rfk_report_prep(rtwdev); 3306 3307 ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx); 3308 if (ret) 3309 return ret; 3310 3311 return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms); 3312 } 3313 EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait); 3314 3315 int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev, 3316 enum rtw89_phy_idx phy_idx, 3317 const struct rtw89_chan *chan, 3318 enum rtw89_tssi_mode tssi_mode, 3319 unsigned int ms) 3320 { 3321 int ret; 3322 3323 rtw89_phy_rfk_report_prep(rtwdev); 3324 3325 ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode); 3326 if (ret) 3327 return ret; 3328 3329 return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms); 3330 } 3331 EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait); 3332 3333 int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev, 3334 enum rtw89_phy_idx phy_idx, 3335 const struct rtw89_chan *chan, 3336 unsigned int ms) 3337 { 3338 int ret; 3339 3340 rtw89_phy_rfk_report_prep(rtwdev); 3341 3342 ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan); 3343 if (ret) 3344 return ret; 3345 3346 return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms); 3347 } 3348 EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait); 3349 3350 int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev, 3351 enum rtw89_phy_idx phy_idx, 3352 const struct rtw89_chan *chan, 3353 unsigned int ms) 3354 { 3355 int ret; 3356 3357 rtw89_phy_rfk_report_prep(rtwdev); 3358 3359 ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan); 3360 if (ret) 3361 return ret; 3362 3363 return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms); 3364 } 3365 EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait); 3366 3367 int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev, 3368 enum rtw89_phy_idx phy_idx, 3369 const struct rtw89_chan *chan, 3370 unsigned int ms) 3371 { 3372 int ret; 3373 3374 rtw89_phy_rfk_report_prep(rtwdev); 3375 3376 ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan); 3377 if (ret) 3378 return ret; 3379 3380 return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms); 3381 } 3382 EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait); 3383 3384 int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev, 3385 enum rtw89_phy_idx phy_idx, 3386 const struct rtw89_chan *chan, 3387 unsigned int ms) 3388 { 3389 int ret; 3390 3391 rtw89_phy_rfk_report_prep(rtwdev); 3392 3393 ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan); 3394 if (ret) 3395 return ret; 3396 3397 return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms); 3398 } 3399 EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait); 3400 3401 int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev, 3402 enum rtw89_phy_idx phy_idx, 3403 const struct rtw89_chan *chan, 3404 bool is_chl_k, unsigned int ms) 3405 { 3406 int ret; 3407 3408 rtw89_phy_rfk_report_prep(rtwdev); 3409 3410 ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan, is_chl_k); 3411 if (ret) 3412 return ret; 3413 3414 return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms); 3415 } 3416 EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait); 3417 3418 static u32 phy_tssi_get_cck_group(u8 ch) 3419 { 3420 switch (ch) { 3421 case 1 ... 2: 3422 return 0; 3423 case 3 ... 5: 3424 return 1; 3425 case 6 ... 8: 3426 return 2; 3427 case 9 ... 11: 3428 return 3; 3429 case 12 ... 13: 3430 return 4; 3431 case 14: 3432 return 5; 3433 } 3434 3435 return 0; 3436 } 3437 3438 #define PHY_TSSI_EXTRA_GROUP_BIT BIT(31) 3439 #define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx)) 3440 #define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT) 3441 #define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \ 3442 ((group) & ~PHY_TSSI_EXTRA_GROUP_BIT) 3443 #define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \ 3444 (PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 3445 3446 static u32 phy_tssi_get_ofdm_group(u8 ch) 3447 { 3448 switch (ch) { 3449 case 1 ... 2: 3450 return 0; 3451 case 3 ... 5: 3452 return 1; 3453 case 6 ... 8: 3454 return 2; 3455 case 9 ... 11: 3456 return 3; 3457 case 12 ... 14: 3458 return 4; 3459 case 36 ... 40: 3460 return 5; 3461 case 41 ... 43: 3462 return PHY_TSSI_EXTRA_GROUP(5); 3463 case 44 ... 48: 3464 return 6; 3465 case 49 ... 51: 3466 return PHY_TSSI_EXTRA_GROUP(6); 3467 case 52 ... 56: 3468 return 7; 3469 case 57 ... 59: 3470 return PHY_TSSI_EXTRA_GROUP(7); 3471 case 60 ... 64: 3472 return 8; 3473 case 100 ... 104: 3474 return 9; 3475 case 105 ... 107: 3476 return PHY_TSSI_EXTRA_GROUP(9); 3477 case 108 ... 112: 3478 return 10; 3479 case 113 ... 115: 3480 return PHY_TSSI_EXTRA_GROUP(10); 3481 case 116 ... 120: 3482 return 11; 3483 case 121 ... 123: 3484 return PHY_TSSI_EXTRA_GROUP(11); 3485 case 124 ... 128: 3486 return 12; 3487 case 129 ... 131: 3488 return PHY_TSSI_EXTRA_GROUP(12); 3489 case 132 ... 136: 3490 return 13; 3491 case 137 ... 139: 3492 return PHY_TSSI_EXTRA_GROUP(13); 3493 case 140 ... 144: 3494 return 14; 3495 case 149 ... 153: 3496 return 15; 3497 case 154 ... 156: 3498 return PHY_TSSI_EXTRA_GROUP(15); 3499 case 157 ... 161: 3500 return 16; 3501 case 162 ... 164: 3502 return PHY_TSSI_EXTRA_GROUP(16); 3503 case 165 ... 169: 3504 return 17; 3505 case 170 ... 172: 3506 return PHY_TSSI_EXTRA_GROUP(17); 3507 case 173 ... 177: 3508 return 18; 3509 } 3510 3511 return 0; 3512 } 3513 3514 static u32 phy_tssi_get_6g_ofdm_group(u8 ch) 3515 { 3516 switch (ch) { 3517 case 1 ... 5: 3518 return 0; 3519 case 6 ... 8: 3520 return PHY_TSSI_EXTRA_GROUP(0); 3521 case 9 ... 13: 3522 return 1; 3523 case 14 ... 16: 3524 return PHY_TSSI_EXTRA_GROUP(1); 3525 case 17 ... 21: 3526 return 2; 3527 case 22 ... 24: 3528 return PHY_TSSI_EXTRA_GROUP(2); 3529 case 25 ... 29: 3530 return 3; 3531 case 33 ... 37: 3532 return 4; 3533 case 38 ... 40: 3534 return PHY_TSSI_EXTRA_GROUP(4); 3535 case 41 ... 45: 3536 return 5; 3537 case 46 ... 48: 3538 return PHY_TSSI_EXTRA_GROUP(5); 3539 case 49 ... 53: 3540 return 6; 3541 case 54 ... 56: 3542 return PHY_TSSI_EXTRA_GROUP(6); 3543 case 57 ... 61: 3544 return 7; 3545 case 65 ... 69: 3546 return 8; 3547 case 70 ... 72: 3548 return PHY_TSSI_EXTRA_GROUP(8); 3549 case 73 ... 77: 3550 return 9; 3551 case 78 ... 80: 3552 return PHY_TSSI_EXTRA_GROUP(9); 3553 case 81 ... 85: 3554 return 10; 3555 case 86 ... 88: 3556 return PHY_TSSI_EXTRA_GROUP(10); 3557 case 89 ... 93: 3558 return 11; 3559 case 97 ... 101: 3560 return 12; 3561 case 102 ... 104: 3562 return PHY_TSSI_EXTRA_GROUP(12); 3563 case 105 ... 109: 3564 return 13; 3565 case 110 ... 112: 3566 return PHY_TSSI_EXTRA_GROUP(13); 3567 case 113 ... 117: 3568 return 14; 3569 case 118 ... 120: 3570 return PHY_TSSI_EXTRA_GROUP(14); 3571 case 121 ... 125: 3572 return 15; 3573 case 129 ... 133: 3574 return 16; 3575 case 134 ... 136: 3576 return PHY_TSSI_EXTRA_GROUP(16); 3577 case 137 ... 141: 3578 return 17; 3579 case 142 ... 144: 3580 return PHY_TSSI_EXTRA_GROUP(17); 3581 case 145 ... 149: 3582 return 18; 3583 case 150 ... 152: 3584 return PHY_TSSI_EXTRA_GROUP(18); 3585 case 153 ... 157: 3586 return 19; 3587 case 161 ... 165: 3588 return 20; 3589 case 166 ... 168: 3590 return PHY_TSSI_EXTRA_GROUP(20); 3591 case 169 ... 173: 3592 return 21; 3593 case 174 ... 176: 3594 return PHY_TSSI_EXTRA_GROUP(21); 3595 case 177 ... 181: 3596 return 22; 3597 case 182 ... 184: 3598 return PHY_TSSI_EXTRA_GROUP(22); 3599 case 185 ... 189: 3600 return 23; 3601 case 193 ... 197: 3602 return 24; 3603 case 198 ... 200: 3604 return PHY_TSSI_EXTRA_GROUP(24); 3605 case 201 ... 205: 3606 return 25; 3607 case 206 ... 208: 3608 return PHY_TSSI_EXTRA_GROUP(25); 3609 case 209 ... 213: 3610 return 26; 3611 case 214 ... 216: 3612 return PHY_TSSI_EXTRA_GROUP(26); 3613 case 217 ... 221: 3614 return 27; 3615 case 225 ... 229: 3616 return 28; 3617 case 230 ... 232: 3618 return PHY_TSSI_EXTRA_GROUP(28); 3619 case 233 ... 237: 3620 return 29; 3621 case 238 ... 240: 3622 return PHY_TSSI_EXTRA_GROUP(29); 3623 case 241 ... 245: 3624 return 30; 3625 case 246 ... 248: 3626 return PHY_TSSI_EXTRA_GROUP(30); 3627 case 249 ... 253: 3628 return 31; 3629 } 3630 3631 return 0; 3632 } 3633 3634 static u32 phy_tssi_get_trim_group(u8 ch) 3635 { 3636 switch (ch) { 3637 case 1 ... 8: 3638 return 0; 3639 case 9 ... 14: 3640 return 1; 3641 case 36 ... 48: 3642 return 2; 3643 case 49 ... 51: 3644 return PHY_TSSI_EXTRA_GROUP(2); 3645 case 52 ... 64: 3646 return 3; 3647 case 100 ... 112: 3648 return 4; 3649 case 113 ... 115: 3650 return PHY_TSSI_EXTRA_GROUP(4); 3651 case 116 ... 128: 3652 return 5; 3653 case 132 ... 144: 3654 return 6; 3655 case 149 ... 177: 3656 return 7; 3657 } 3658 3659 return 0; 3660 } 3661 3662 static u32 phy_tssi_get_6g_trim_group(u8 ch) 3663 { 3664 switch (ch) { 3665 case 1 ... 13: 3666 return 0; 3667 case 14 ... 16: 3668 return PHY_TSSI_EXTRA_GROUP(0); 3669 case 17 ... 29: 3670 return 1; 3671 case 33 ... 45: 3672 return 2; 3673 case 46 ... 48: 3674 return PHY_TSSI_EXTRA_GROUP(2); 3675 case 49 ... 61: 3676 return 3; 3677 case 65 ... 77: 3678 return 4; 3679 case 78 ... 80: 3680 return PHY_TSSI_EXTRA_GROUP(4); 3681 case 81 ... 93: 3682 return 5; 3683 case 97 ... 109: 3684 return 6; 3685 case 110 ... 112: 3686 return PHY_TSSI_EXTRA_GROUP(6); 3687 case 113 ... 125: 3688 return 7; 3689 case 129 ... 141: 3690 return 8; 3691 case 142 ... 144: 3692 return PHY_TSSI_EXTRA_GROUP(8); 3693 case 145 ... 157: 3694 return 9; 3695 case 161 ... 173: 3696 return 10; 3697 case 174 ... 176: 3698 return PHY_TSSI_EXTRA_GROUP(10); 3699 case 177 ... 189: 3700 return 11; 3701 case 193 ... 205: 3702 return 12; 3703 case 206 ... 208: 3704 return PHY_TSSI_EXTRA_GROUP(12); 3705 case 209 ... 221: 3706 return 13; 3707 case 225 ... 237: 3708 return 14; 3709 case 238 ... 240: 3710 return PHY_TSSI_EXTRA_GROUP(14); 3711 case 241 ... 253: 3712 return 15; 3713 } 3714 3715 return 0; 3716 } 3717 3718 static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev, 3719 enum rtw89_phy_idx phy, 3720 const struct rtw89_chan *chan, 3721 enum rtw89_rf_path path) 3722 { 3723 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3724 enum rtw89_band band = chan->band_type; 3725 u8 ch = chan->channel; 3726 u32 gidx_1st; 3727 u32 gidx_2nd; 3728 s8 de_1st; 3729 s8 de_2nd; 3730 u32 gidx; 3731 s8 val; 3732 3733 if (band == RTW89_BAND_6G) 3734 goto calc_6g; 3735 3736 gidx = phy_tssi_get_ofdm_group(ch); 3737 3738 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3739 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3740 path, gidx); 3741 3742 if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) { 3743 gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3744 gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3745 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 3746 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 3747 val = (de_1st + de_2nd) / 2; 3748 3749 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3750 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3751 path, val, de_1st, de_2nd); 3752 } else { 3753 val = tssi_info->tssi_mcs[path][gidx]; 3754 3755 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3756 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3757 } 3758 3759 return val; 3760 3761 calc_6g: 3762 gidx = phy_tssi_get_6g_ofdm_group(ch); 3763 3764 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3765 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3766 path, gidx); 3767 3768 if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) { 3769 gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3770 gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3771 de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st]; 3772 de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd]; 3773 val = (de_1st + de_2nd) / 2; 3774 3775 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3776 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3777 path, val, de_1st, de_2nd); 3778 } else { 3779 val = tssi_info->tssi_6g_mcs[path][gidx]; 3780 3781 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3782 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3783 } 3784 3785 return val; 3786 } 3787 3788 static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, 3789 enum rtw89_phy_idx phy, 3790 const struct rtw89_chan *chan, 3791 enum rtw89_rf_path path) 3792 { 3793 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3794 enum rtw89_band band = chan->band_type; 3795 u8 ch = chan->channel; 3796 u32 tgidx_1st; 3797 u32 tgidx_2nd; 3798 s8 tde_1st; 3799 s8 tde_2nd; 3800 u32 tgidx; 3801 s8 val; 3802 3803 if (band == RTW89_BAND_6G) 3804 goto calc_6g; 3805 3806 tgidx = phy_tssi_get_trim_group(ch); 3807 3808 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3809 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 3810 path, tgidx); 3811 3812 if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) { 3813 tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3814 tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3815 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 3816 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 3817 val = (tde_1st + tde_2nd) / 2; 3818 3819 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3820 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3821 path, val, tde_1st, tde_2nd); 3822 } else { 3823 val = tssi_info->tssi_trim[path][tgidx]; 3824 3825 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3826 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3827 path, val); 3828 } 3829 3830 return val; 3831 3832 calc_6g: 3833 tgidx = phy_tssi_get_6g_trim_group(ch); 3834 3835 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3836 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 3837 path, tgidx); 3838 3839 if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) { 3840 tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3841 tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3842 tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st]; 3843 tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd]; 3844 val = (tde_1st + tde_2nd) / 2; 3845 3846 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3847 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3848 path, val, tde_1st, tde_2nd); 3849 } else { 3850 val = tssi_info->tssi_trim_6g[path][tgidx]; 3851 3852 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3853 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3854 path, val); 3855 } 3856 3857 return val; 3858 } 3859 3860 void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev, 3861 enum rtw89_phy_idx phy, 3862 const struct rtw89_chan *chan, 3863 struct rtw89_h2c_rf_tssi *h2c) 3864 { 3865 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3866 u8 ch = chan->channel; 3867 s8 trim_de; 3868 s8 ofdm_de; 3869 s8 cck_de; 3870 u8 gidx; 3871 s8 val; 3872 int i; 3873 3874 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 3875 phy, ch); 3876 3877 for (i = RF_PATH_A; i <= RF_PATH_B; i++) { 3878 trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i); 3879 h2c->curr_tssi_trim_de[i] = trim_de; 3880 3881 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3882 "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de); 3883 3884 gidx = phy_tssi_get_cck_group(ch); 3885 cck_de = tssi_info->tssi_cck[i][gidx]; 3886 val = u32_get_bits(cck_de + trim_de, 0xff); 3887 3888 h2c->curr_tssi_cck_de[i] = 0x0; 3889 h2c->curr_tssi_cck_de_20m[i] = val; 3890 h2c->curr_tssi_cck_de_40m[i] = val; 3891 h2c->curr_tssi_efuse_cck_de[i] = cck_de; 3892 3893 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3894 "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de); 3895 3896 ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i); 3897 val = u32_get_bits(ofdm_de + trim_de, 0xff); 3898 3899 h2c->curr_tssi_ofdm_de[i] = 0x0; 3900 h2c->curr_tssi_ofdm_de_20m[i] = val; 3901 h2c->curr_tssi_ofdm_de_40m[i] = val; 3902 h2c->curr_tssi_ofdm_de_80m[i] = val; 3903 h2c->curr_tssi_ofdm_de_160m[i] = val; 3904 h2c->curr_tssi_ofdm_de_320m[i] = val; 3905 h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de; 3906 3907 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3908 "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de); 3909 } 3910 } 3911 3912 void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev, 3913 enum rtw89_phy_idx phy, 3914 const struct rtw89_chan *chan, 3915 struct rtw89_h2c_rf_tssi *h2c) 3916 { 3917 struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk; 3918 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3919 const s8 *thm_up[RF_PATH_B + 1] = {}; 3920 const s8 *thm_down[RF_PATH_B + 1] = {}; 3921 u8 subband = chan->subband_type; 3922 s8 thm_ofst[128] = {0}; 3923 u8 thermal; 3924 u8 path; 3925 u8 i, j; 3926 3927 switch (subband) { 3928 default: 3929 case RTW89_CH_2G: 3930 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0]; 3931 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0]; 3932 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0]; 3933 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0]; 3934 break; 3935 case RTW89_CH_5G_BAND_1: 3936 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0]; 3937 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0]; 3938 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0]; 3939 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0]; 3940 break; 3941 case RTW89_CH_5G_BAND_3: 3942 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1]; 3943 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1]; 3944 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1]; 3945 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1]; 3946 break; 3947 case RTW89_CH_5G_BAND_4: 3948 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2]; 3949 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2]; 3950 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2]; 3951 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2]; 3952 break; 3953 case RTW89_CH_6G_BAND_IDX0: 3954 case RTW89_CH_6G_BAND_IDX1: 3955 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0]; 3956 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0]; 3957 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0]; 3958 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0]; 3959 break; 3960 case RTW89_CH_6G_BAND_IDX2: 3961 case RTW89_CH_6G_BAND_IDX3: 3962 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1]; 3963 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1]; 3964 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1]; 3965 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1]; 3966 break; 3967 case RTW89_CH_6G_BAND_IDX4: 3968 case RTW89_CH_6G_BAND_IDX5: 3969 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2]; 3970 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2]; 3971 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2]; 3972 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2]; 3973 break; 3974 case RTW89_CH_6G_BAND_IDX6: 3975 case RTW89_CH_6G_BAND_IDX7: 3976 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3]; 3977 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3]; 3978 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3]; 3979 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3]; 3980 break; 3981 } 3982 3983 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3984 "[TSSI] tmeter tbl on subband: %u\n", subband); 3985 3986 for (path = RF_PATH_A; path <= RF_PATH_B; path++) { 3987 thermal = tssi_info->thermal[path]; 3988 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3989 "path: %u, pg thermal: 0x%x\n", path, thermal); 3990 3991 if (thermal == 0xff) { 3992 h2c->pg_thermal[path] = 0x38; 3993 memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path])); 3994 continue; 3995 } 3996 3997 h2c->pg_thermal[path] = thermal; 3998 3999 i = 0; 4000 for (j = 0; j < 64; j++) 4001 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 4002 thm_up[path][i++] : 4003 thm_up[path][DELTA_SWINGIDX_SIZE - 1]; 4004 4005 i = 1; 4006 for (j = 127; j >= 64; j--) 4007 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 4008 -thm_down[path][i++] : 4009 -thm_down[path][DELTA_SWINGIDX_SIZE - 1]; 4010 4011 for (i = 0; i < 128; i += 4) { 4012 h2c->ftable[path][i + 0] = thm_ofst[i + 3]; 4013 h2c->ftable[path][i + 1] = thm_ofst[i + 2]; 4014 h2c->ftable[path][i + 2] = thm_ofst[i + 1]; 4015 h2c->ftable[path][i + 3] = thm_ofst[i + 0]; 4016 4017 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4018 "thm ofst [%x]: %02x %02x %02x %02x\n", 4019 i, thm_ofst[i], thm_ofst[i + 1], 4020 thm_ofst[i + 2], thm_ofst[i + 3]); 4021 } 4022 } 4023 } 4024 4025 static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo) 4026 { 4027 const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info; 4028 u32 reg_mask; 4029 4030 if (sc_xo) 4031 reg_mask = xtal->sc_xo_mask; 4032 else 4033 reg_mask = xtal->sc_xi_mask; 4034 4035 return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask); 4036 } 4037 4038 static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo, 4039 u8 val) 4040 { 4041 const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info; 4042 u32 reg_mask; 4043 4044 if (sc_xo) 4045 reg_mask = xtal->sc_xo_mask; 4046 else 4047 reg_mask = xtal->sc_xi_mask; 4048 4049 rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val); 4050 } 4051 4052 static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev, 4053 u8 crystal_cap, bool force) 4054 { 4055 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4056 const struct rtw89_chip_info *chip = rtwdev->chip; 4057 u8 sc_xi_val, sc_xo_val; 4058 4059 if (!force && cfo->crystal_cap == crystal_cap) 4060 return; 4061 crystal_cap = clamp_t(u8, crystal_cap, 0, 127); 4062 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) { 4063 rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap); 4064 rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap); 4065 sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true); 4066 sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false); 4067 } else { 4068 rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, 4069 crystal_cap, XTAL_SC_XO_MASK); 4070 rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, 4071 crystal_cap, XTAL_SC_XI_MASK); 4072 rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val); 4073 rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val); 4074 } 4075 cfo->crystal_cap = sc_xi_val; 4076 cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap); 4077 4078 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val); 4079 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val); 4080 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n", 4081 cfo->x_cap_ofst); 4082 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n"); 4083 } 4084 4085 static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev) 4086 { 4087 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4088 u8 cap; 4089 4090 cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK; 4091 cfo->is_adjust = false; 4092 if (cfo->crystal_cap == cfo->def_x_cap) 4093 return; 4094 cap = cfo->crystal_cap; 4095 cap += (cap > cfo->def_x_cap ? -1 : 1); 4096 rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false); 4097 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4098 "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap, 4099 cfo->def_x_cap); 4100 } 4101 4102 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) 4103 { 4104 const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp; 4105 bool is_linked = rtwdev->total_sta_assoc > 0; 4106 s32 cfo_avg_312; 4107 s32 dcfo_comp_val; 4108 int sign; 4109 4110 if (rtwdev->chip->chip_id == RTL8922A) 4111 return; 4112 4113 if (!is_linked) { 4114 rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n", 4115 is_linked); 4116 return; 4117 } 4118 rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo); 4119 if (curr_cfo == 0) 4120 return; 4121 dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO); 4122 sign = curr_cfo > 0 ? 1 : -1; 4123 cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val; 4124 rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312); 4125 if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) 4126 cfo_avg_312 = -cfo_avg_312; 4127 rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask, 4128 cfo_avg_312); 4129 } 4130 4131 static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev) 4132 { 4133 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4134 const struct rtw89_chip_info *chip = rtwdev->chip; 4135 const struct rtw89_cfo_regs *cfo = phy->cfo; 4136 4137 rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1); 4138 rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8); 4139 4140 if (chip->chip_gen == RTW89_CHIP_AX) { 4141 if (chip->cfo_hw_comp) { 4142 rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2, 4143 B_AX_PWR_UL_CFO_MASK, 0x6); 4144 } else { 4145 rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); 4146 rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, 4147 B_AX_PWR_UL_CFO_MASK); 4148 } 4149 } 4150 } 4151 4152 static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) 4153 { 4154 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4155 struct rtw89_efuse *efuse = &rtwdev->efuse; 4156 4157 cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK; 4158 cfo->crystal_cap = cfo->crystal_cap_default; 4159 cfo->def_x_cap = cfo->crystal_cap; 4160 cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f); 4161 cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1); 4162 cfo->is_adjust = false; 4163 cfo->divergence_lock_en = false; 4164 cfo->x_cap_ofst = 0; 4165 cfo->lock_cnt = 0; 4166 cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE; 4167 cfo->apply_compensation = false; 4168 cfo->residual_cfo_acc = 0; 4169 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n", 4170 cfo->crystal_cap_default); 4171 rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true); 4172 rtw89_dcfo_comp_init(rtwdev); 4173 cfo->cfo_timer_ms = 2000; 4174 cfo->cfo_trig_by_timer_en = false; 4175 cfo->phy_cfo_trk_cnt = 0; 4176 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4177 cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE; 4178 } 4179 4180 static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev, 4181 s32 curr_cfo) 4182 { 4183 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4184 s8 crystal_cap = cfo->crystal_cap; 4185 s32 cfo_abs = abs(curr_cfo); 4186 int sign; 4187 4188 if (curr_cfo == 0) { 4189 rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); 4190 return; 4191 } 4192 if (!cfo->is_adjust) { 4193 if (cfo_abs > CFO_TRK_ENABLE_TH) 4194 cfo->is_adjust = true; 4195 } else { 4196 if (cfo_abs <= CFO_TRK_STOP_TH) 4197 cfo->is_adjust = false; 4198 } 4199 if (!cfo->is_adjust) { 4200 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n"); 4201 return; 4202 } 4203 sign = curr_cfo > 0 ? 1 : -1; 4204 if (cfo_abs > CFO_TRK_STOP_TH_4) 4205 crystal_cap += 7 * sign; 4206 else if (cfo_abs > CFO_TRK_STOP_TH_3) 4207 crystal_cap += 5 * sign; 4208 else if (cfo_abs > CFO_TRK_STOP_TH_2) 4209 crystal_cap += 3 * sign; 4210 else if (cfo_abs > CFO_TRK_STOP_TH_1) 4211 crystal_cap += 1 * sign; 4212 else 4213 return; 4214 rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false); 4215 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4216 "X_cap{Curr,Default}={0x%x,0x%x}\n", 4217 cfo->crystal_cap, cfo->def_x_cap); 4218 } 4219 4220 static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev) 4221 { 4222 const struct rtw89_chip_info *chip = rtwdev->chip; 4223 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4224 s32 cfo_khz_all = 0; 4225 s32 cfo_cnt_all = 0; 4226 s32 cfo_all_avg = 0; 4227 u8 i; 4228 4229 if (rtwdev->total_sta_assoc != 1) 4230 return 0; 4231 rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n"); 4232 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4233 if (cfo->cfo_cnt[i] == 0) 4234 continue; 4235 cfo_khz_all += cfo->cfo_tail[i]; 4236 cfo_cnt_all += cfo->cfo_cnt[i]; 4237 cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all); 4238 cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; 4239 cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft, 4240 cfo_cnt_all); 4241 } 4242 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4243 "CFO track for macid = %d\n", i); 4244 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4245 "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n", 4246 cfo_khz_all, cfo_cnt_all, cfo_all_avg); 4247 return cfo_all_avg; 4248 } 4249 4250 static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev) 4251 { 4252 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4253 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4254 s32 target_cfo = 0; 4255 s32 cfo_khz_all = 0; 4256 s32 cfo_khz_all_tp_wgt = 0; 4257 s32 cfo_avg = 0; 4258 s32 max_cfo_lb = BIT(31); 4259 s32 min_cfo_ub = GENMASK(30, 0); 4260 u16 cfo_cnt_all = 0; 4261 u8 active_entry_cnt = 0; 4262 u8 sta_cnt = 0; 4263 u32 tp_all = 0; 4264 u8 i; 4265 u8 cfo_tol = 0; 4266 4267 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n"); 4268 if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) { 4269 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n"); 4270 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4271 if (cfo->cfo_cnt[i] == 0) 4272 continue; 4273 cfo_khz_all += cfo->cfo_tail[i]; 4274 cfo_cnt_all += cfo->cfo_cnt[i]; 4275 cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all); 4276 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4277 "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n", 4278 cfo_khz_all, cfo_cnt_all, cfo_avg); 4279 target_cfo = cfo_avg; 4280 } 4281 } else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) { 4282 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n"); 4283 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4284 if (cfo->cfo_cnt[i] == 0) 4285 continue; 4286 cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], 4287 (s32)cfo->cfo_cnt[i]); 4288 cfo_khz_all += cfo->cfo_avg[i]; 4289 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4290 "Macid=%d, cfo_avg=%d\n", i, 4291 cfo->cfo_avg[i]); 4292 } 4293 sta_cnt = rtwdev->total_sta_assoc; 4294 cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt); 4295 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4296 "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n", 4297 cfo_khz_all, sta_cnt, cfo_avg); 4298 target_cfo = cfo_avg; 4299 } else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) { 4300 rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n"); 4301 cfo_tol = cfo->sta_cfo_tolerance; 4302 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4303 sta_cnt++; 4304 if (cfo->cfo_cnt[i] != 0) { 4305 cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], 4306 (s32)cfo->cfo_cnt[i]); 4307 active_entry_cnt++; 4308 } else { 4309 cfo->cfo_avg[i] = cfo->pre_cfo_avg[i]; 4310 } 4311 max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb); 4312 min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub); 4313 cfo_khz_all += cfo->cfo_avg[i]; 4314 /* need tp for each entry */ 4315 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4316 "[%d] cfo_avg=%d, tp=tbd\n", 4317 i, cfo->cfo_avg[i]); 4318 if (sta_cnt >= rtwdev->total_sta_assoc) 4319 break; 4320 } 4321 tp_all = stats->rx_throughput; /* need tp for each entry */ 4322 cfo_avg = phy_div(cfo_khz_all_tp_wgt, (s32)tp_all); 4323 4324 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n", 4325 sta_cnt); 4326 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n", 4327 active_entry_cnt); 4328 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4329 "Msta cfo with tp_wgt=%d, avg_cfo=%d\n", 4330 cfo_khz_all_tp_wgt, cfo_avg); 4331 rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n", 4332 max_cfo_lb, min_cfo_ub); 4333 if (max_cfo_lb <= min_cfo_ub) { 4334 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4335 "cfo win_size=%d\n", 4336 min_cfo_ub - max_cfo_lb); 4337 target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub); 4338 } else { 4339 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4340 "No intersection of cfo tolerance windows\n"); 4341 target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt); 4342 } 4343 for (i = 0; i < CFO_TRACK_MAX_USER; i++) 4344 cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; 4345 } 4346 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo); 4347 return target_cfo; 4348 } 4349 4350 static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev) 4351 { 4352 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4353 4354 memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail)); 4355 memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt)); 4356 cfo->packet_count = 0; 4357 cfo->packet_count_pre = 0; 4358 cfo->cfo_avg_pre = 0; 4359 } 4360 4361 static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) 4362 { 4363 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4364 s32 new_cfo = 0; 4365 bool x_cap_update = false; 4366 u8 pre_x_cap = cfo->crystal_cap; 4367 u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft; 4368 4369 cfo->dcfo_avg = 0; 4370 rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n", 4371 rtwdev->total_sta_assoc); 4372 if (rtwdev->total_sta_assoc == 0) { 4373 rtw89_phy_cfo_reset(rtwdev); 4374 return; 4375 } 4376 if (cfo->packet_count == 0) { 4377 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n"); 4378 return; 4379 } 4380 if (cfo->packet_count == cfo->packet_count_pre) { 4381 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n"); 4382 return; 4383 } 4384 if (rtwdev->total_sta_assoc == 1) 4385 new_cfo = rtw89_phy_average_cfo_calc(rtwdev); 4386 else 4387 new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev); 4388 if (cfo->divergence_lock_en) { 4389 cfo->lock_cnt++; 4390 if (cfo->lock_cnt > CFO_PERIOD_CNT) { 4391 cfo->divergence_lock_en = false; 4392 cfo->lock_cnt = 0; 4393 } else { 4394 rtw89_phy_cfo_reset(rtwdev); 4395 } 4396 return; 4397 } 4398 if (cfo->crystal_cap >= cfo->x_cap_ub || 4399 cfo->crystal_cap <= cfo->x_cap_lb) { 4400 cfo->divergence_lock_en = true; 4401 rtw89_phy_cfo_reset(rtwdev); 4402 return; 4403 } 4404 4405 rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo); 4406 cfo->cfo_avg_pre = new_cfo; 4407 cfo->dcfo_avg_pre = cfo->dcfo_avg; 4408 x_cap_update = cfo->crystal_cap != pre_x_cap; 4409 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update); 4410 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n", 4411 cfo->def_x_cap, pre_x_cap, cfo->crystal_cap, 4412 cfo->x_cap_ofst); 4413 if (x_cap_update) { 4414 if (cfo->dcfo_avg > 0) 4415 cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft; 4416 else 4417 cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft; 4418 } 4419 rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg); 4420 rtw89_phy_cfo_statistics_reset(rtwdev); 4421 } 4422 4423 void rtw89_phy_cfo_track_work(struct work_struct *work) 4424 { 4425 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 4426 cfo_track_work.work); 4427 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4428 4429 mutex_lock(&rtwdev->mutex); 4430 if (!cfo->cfo_trig_by_timer_en) 4431 goto out; 4432 rtw89_leave_ps_mode(rtwdev); 4433 rtw89_phy_cfo_dm(rtwdev); 4434 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, 4435 msecs_to_jiffies(cfo->cfo_timer_ms)); 4436 out: 4437 mutex_unlock(&rtwdev->mutex); 4438 } 4439 4440 static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev) 4441 { 4442 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4443 4444 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, 4445 msecs_to_jiffies(cfo->cfo_timer_ms)); 4446 } 4447 4448 void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev) 4449 { 4450 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4451 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4452 bool is_ul_ofdma = false, ofdma_acc_en = false; 4453 4454 if (stats->rx_tf_periodic > CFO_TF_CNT_TH) 4455 is_ul_ofdma = true; 4456 if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE && 4457 is_ul_ofdma) 4458 ofdma_acc_en = true; 4459 4460 switch (cfo->phy_cfo_status) { 4461 case RTW89_PHY_DCFO_STATE_NORMAL: 4462 if (stats->tx_throughput >= CFO_TP_UPPER) { 4463 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE; 4464 cfo->cfo_trig_by_timer_en = true; 4465 cfo->cfo_timer_ms = CFO_COMP_PERIOD; 4466 rtw89_phy_cfo_start_work(rtwdev); 4467 } 4468 break; 4469 case RTW89_PHY_DCFO_STATE_ENHANCE: 4470 if (stats->tx_throughput <= CFO_TP_LOWER) 4471 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4472 else if (ofdma_acc_en && 4473 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT) 4474 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD; 4475 else 4476 cfo->phy_cfo_trk_cnt++; 4477 4478 if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) { 4479 cfo->phy_cfo_trk_cnt = 0; 4480 cfo->cfo_trig_by_timer_en = false; 4481 } 4482 break; 4483 case RTW89_PHY_DCFO_STATE_HOLD: 4484 if (stats->tx_throughput <= CFO_TP_LOWER) { 4485 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4486 cfo->phy_cfo_trk_cnt = 0; 4487 cfo->cfo_trig_by_timer_en = false; 4488 } else { 4489 cfo->phy_cfo_trk_cnt++; 4490 } 4491 break; 4492 default: 4493 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4494 cfo->phy_cfo_trk_cnt = 0; 4495 break; 4496 } 4497 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4498 "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n", 4499 stats->tx_throughput, cfo->phy_cfo_status, 4500 cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt, 4501 ewma_thermal_read(&rtwdev->phystat.avg_thermal[0])); 4502 if (cfo->cfo_trig_by_timer_en) 4503 return; 4504 rtw89_phy_cfo_dm(rtwdev); 4505 } 4506 4507 void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, 4508 struct rtw89_rx_phy_ppdu *phy_ppdu) 4509 { 4510 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4511 u8 macid = phy_ppdu->mac_id; 4512 4513 if (macid >= CFO_TRACK_MAX_USER) { 4514 rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid); 4515 return; 4516 } 4517 4518 cfo->cfo_tail[macid] += cfo_val; 4519 cfo->cfo_cnt[macid]++; 4520 cfo->packet_count++; 4521 } 4522 4523 void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4524 { 4525 const struct rtw89_chip_info *chip = rtwdev->chip; 4526 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 4527 rtwvif_link->chanctx_idx); 4528 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4529 4530 if (!chip->ul_tb_waveform_ctrl) 4531 return; 4532 4533 rtwvif_link->def_tri_idx = 4534 rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG); 4535 4536 if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV) 4537 rtwvif_link->dyn_tb_bedge_en = false; 4538 else if (chan->band_type >= RTW89_BAND_5G && 4539 chan->band_width >= RTW89_CHANNEL_WIDTH_40) 4540 rtwvif_link->dyn_tb_bedge_en = true; 4541 else 4542 rtwvif_link->dyn_tb_bedge_en = false; 4543 4544 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4545 "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n", 4546 ul_tb_info->def_if_bandedge, rtwvif_link->def_tri_idx); 4547 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4548 "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n", 4549 rtwvif_link->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en); 4550 } 4551 4552 struct rtw89_phy_ul_tb_check_data { 4553 bool valid; 4554 bool high_tf_client; 4555 bool low_tf_client; 4556 bool dyn_tb_bedge_en; 4557 u8 def_tri_idx; 4558 }; 4559 4560 struct rtw89_phy_power_diff { 4561 u32 q_00; 4562 u32 q_11; 4563 u32 q_matrix_en; 4564 u32 ultb_1t_norm_160; 4565 u32 ultb_2t_norm_160; 4566 u32 com1_norm_1sts; 4567 u32 com2_resp_1sts_path; 4568 }; 4569 4570 static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev, 4571 struct rtw89_vif_link *rtwvif_link) 4572 { 4573 static const struct rtw89_phy_power_diff table[2] = { 4574 {0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3}, 4575 {0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1}, 4576 }; 4577 const struct rtw89_phy_power_diff *param; 4578 u32 reg; 4579 4580 if (!rtwdev->chip->ul_tb_pwr_diff) 4581 return; 4582 4583 if (rtwvif_link->pwr_diff_en == rtwvif_link->pre_pwr_diff_en) { 4584 rtwvif_link->pwr_diff_en = false; 4585 return; 4586 } 4587 4588 rtwvif_link->pre_pwr_diff_en = rtwvif_link->pwr_diff_en; 4589 param = &table[rtwvif_link->pwr_diff_en]; 4590 4591 rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL, 4592 param->q_00); 4593 rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL, 4594 param->q_11); 4595 rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX, 4596 B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en); 4597 4598 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif_link->mac_idx); 4599 rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160, 4600 param->ultb_1t_norm_160); 4601 4602 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif_link->mac_idx); 4603 rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160, 4604 param->ultb_2t_norm_160); 4605 4606 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif_link->mac_idx); 4607 rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS, 4608 param->com1_norm_1sts); 4609 4610 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif_link->mac_idx); 4611 rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH, 4612 param->com2_resp_1sts_path); 4613 } 4614 4615 static 4616 void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev, 4617 struct rtw89_vif_link *rtwvif_link, 4618 struct rtw89_phy_ul_tb_check_data *ul_tb_data) 4619 { 4620 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4621 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4622 4623 if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION) 4624 return; 4625 4626 if (!vif->cfg.assoc) 4627 return; 4628 4629 if (rtwdev->chip->ul_tb_waveform_ctrl) { 4630 if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH) 4631 ul_tb_data->high_tf_client = true; 4632 else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH) 4633 ul_tb_data->low_tf_client = true; 4634 4635 ul_tb_data->valid = true; 4636 ul_tb_data->def_tri_idx = rtwvif_link->def_tri_idx; 4637 ul_tb_data->dyn_tb_bedge_en = rtwvif_link->dyn_tb_bedge_en; 4638 } 4639 4640 rtw89_phy_ofdma_power_diff(rtwdev, rtwvif_link); 4641 } 4642 4643 static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev, 4644 struct rtw89_phy_ul_tb_check_data *ul_tb_data) 4645 { 4646 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4647 4648 if (!rtwdev->chip->ul_tb_waveform_ctrl) 4649 return; 4650 4651 if (ul_tb_data->dyn_tb_bedge_en) { 4652 if (ul_tb_data->high_tf_client) { 4653 rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0); 4654 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4655 "[ULTB] Turn off if_bandedge\n"); 4656 } else if (ul_tb_data->low_tf_client) { 4657 rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 4658 ul_tb_info->def_if_bandedge); 4659 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4660 "[ULTB] Set to default if_bandedge = %d\n", 4661 ul_tb_info->def_if_bandedge); 4662 } 4663 } 4664 4665 if (ul_tb_info->dyn_tb_tri_en) { 4666 if (ul_tb_data->high_tf_client) { 4667 rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, 4668 B_TXSHAPE_TRIANGULAR_CFG, 0); 4669 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4670 "[ULTB] Turn off Tx triangle\n"); 4671 } else if (ul_tb_data->low_tf_client) { 4672 rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, 4673 B_TXSHAPE_TRIANGULAR_CFG, 4674 ul_tb_data->def_tri_idx); 4675 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4676 "[ULTB] Set to default tx_shap_idx = %d\n", 4677 ul_tb_data->def_tri_idx); 4678 } 4679 } 4680 } 4681 4682 void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) 4683 { 4684 const struct rtw89_chip_info *chip = rtwdev->chip; 4685 struct rtw89_phy_ul_tb_check_data ul_tb_data = {}; 4686 struct rtw89_vif_link *rtwvif_link; 4687 struct rtw89_vif *rtwvif; 4688 unsigned int link_id; 4689 4690 if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff) 4691 return; 4692 4693 if (rtwdev->total_sta_assoc != 1) 4694 return; 4695 4696 rtw89_for_each_rtwvif(rtwdev, rtwvif) 4697 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 4698 rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif_link, &ul_tb_data); 4699 4700 if (!ul_tb_data.valid) 4701 return; 4702 4703 rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data); 4704 } 4705 4706 static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev) 4707 { 4708 const struct rtw89_chip_info *chip = rtwdev->chip; 4709 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4710 4711 if (!chip->ul_tb_waveform_ctrl) 4712 return; 4713 4714 ul_tb_info->dyn_tb_tri_en = true; 4715 ul_tb_info->def_if_bandedge = 4716 rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN); 4717 } 4718 4719 static 4720 void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts) 4721 { 4722 ewma_rssi_init(&antdiv_sts->cck_rssi_avg); 4723 ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg); 4724 ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg); 4725 antdiv_sts->pkt_cnt_cck = 0; 4726 antdiv_sts->pkt_cnt_ofdm = 0; 4727 antdiv_sts->pkt_cnt_non_legacy = 0; 4728 antdiv_sts->evm = 0; 4729 } 4730 4731 static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev, 4732 struct rtw89_rx_phy_ppdu *phy_ppdu, 4733 struct rtw89_antdiv_stats *stats) 4734 { 4735 if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) { 4736 if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) { 4737 ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg); 4738 stats->pkt_cnt_cck++; 4739 } else { 4740 ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg); 4741 stats->pkt_cnt_ofdm++; 4742 stats->evm += phy_ppdu->ofdm.evm_min; 4743 } 4744 } else { 4745 ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg); 4746 stats->pkt_cnt_non_legacy++; 4747 stats->evm += phy_ppdu->ofdm.evm_min; 4748 } 4749 } 4750 4751 static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats) 4752 { 4753 if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck && 4754 stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm) 4755 return ewma_rssi_read(&stats->non_legacy_rssi_avg); 4756 else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck && 4757 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy) 4758 return ewma_rssi_read(&stats->ofdm_rssi_avg); 4759 else 4760 return ewma_rssi_read(&stats->cck_rssi_avg); 4761 } 4762 4763 static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats) 4764 { 4765 return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm); 4766 } 4767 4768 void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev, 4769 struct rtw89_rx_phy_ppdu *phy_ppdu) 4770 { 4771 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 4772 struct rtw89_hal *hal = &rtwdev->hal; 4773 4774 if (!hal->ant_diversity || hal->ant_diversity_fixed) 4775 return; 4776 4777 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats); 4778 4779 if (!antdiv->get_stats) 4780 return; 4781 4782 if (hal->antenna_rx == RF_A) 4783 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats); 4784 else if (hal->antenna_rx == RF_B) 4785 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats); 4786 } 4787 4788 static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev) 4789 { 4790 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN, 4791 0x0, RTW89_PHY_0); 4792 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL, 4793 0x0, RTW89_PHY_0); 4794 4795 rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND, 4796 0x0, RTW89_PHY_0); 4797 rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT, 4798 0x0, RTW89_PHY_0); 4799 4800 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN, 4801 0x0, RTW89_PHY_0); 4802 4803 rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING, 4804 0x0100, RTW89_PHY_0); 4805 4806 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX, 4807 0x1, RTW89_PHY_0); 4808 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL, 4809 0x0, RTW89_PHY_0); 4810 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G, 4811 0x0, RTW89_PHY_0); 4812 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G, 4813 0x0, RTW89_PHY_0); 4814 } 4815 4816 static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev) 4817 { 4818 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 4819 4820 rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats); 4821 rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats); 4822 rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats); 4823 } 4824 4825 static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev) 4826 { 4827 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 4828 struct rtw89_hal *hal = &rtwdev->hal; 4829 4830 if (!hal->ant_diversity) 4831 return; 4832 4833 antdiv->get_stats = false; 4834 antdiv->rssi_pre = 0; 4835 rtw89_phy_antdiv_sts_reset(rtwdev); 4836 rtw89_phy_antdiv_reg_init(rtwdev); 4837 } 4838 4839 static void rtw89_phy_thermal_protect(struct rtw89_dev *rtwdev) 4840 { 4841 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 4842 struct rtw89_hal *hal = &rtwdev->hal; 4843 u8 th_max = phystat->last_thermal_max; 4844 u8 lv = hal->thermal_prot_lv; 4845 4846 if (!hal->thermal_prot_th || 4847 (hal->disabled_dm_bitmap & BIT(RTW89_DM_THERMAL_PROTECT))) 4848 return; 4849 4850 if (th_max > hal->thermal_prot_th && lv < RTW89_THERMAL_PROT_LV_MAX) 4851 lv++; 4852 else if (th_max < hal->thermal_prot_th - 2 && lv > 0) 4853 lv--; 4854 else 4855 return; 4856 4857 hal->thermal_prot_lv = lv; 4858 4859 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "thermal protection lv=%d\n", lv); 4860 4861 rtw89_fw_h2c_tx_duty(rtwdev, hal->thermal_prot_lv); 4862 } 4863 4864 static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev) 4865 { 4866 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 4867 u8 th, th_max = 0; 4868 int i; 4869 4870 for (i = 0; i < rtwdev->chip->rf_path_num; i++) { 4871 th = rtw89_chip_get_thermal(rtwdev, i); 4872 if (th) 4873 ewma_thermal_add(&phystat->avg_thermal[i], th); 4874 4875 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 4876 "path(%d) thermal cur=%u avg=%ld", i, th, 4877 ewma_thermal_read(&phystat->avg_thermal[i])); 4878 4879 th_max = max(th_max, th); 4880 } 4881 4882 phystat->last_thermal_max = th_max; 4883 } 4884 4885 struct rtw89_phy_iter_rssi_data { 4886 struct rtw89_dev *rtwdev; 4887 struct rtw89_phy_ch_info *ch_info; 4888 bool rssi_changed; 4889 }; 4890 4891 static 4892 void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link, 4893 struct rtw89_phy_iter_rssi_data *rssi_data) 4894 { 4895 struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info; 4896 unsigned long rssi_curr; 4897 4898 rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi); 4899 4900 if (rssi_curr < ch_info->rssi_min) { 4901 ch_info->rssi_min = rssi_curr; 4902 ch_info->rssi_min_macid = rtwsta_link->mac_id; 4903 } 4904 4905 if (rtwsta_link->prev_rssi == 0) { 4906 rtwsta_link->prev_rssi = rssi_curr; 4907 } else if (abs((int)rtwsta_link->prev_rssi - (int)rssi_curr) > 4908 (3 << RSSI_FACTOR)) { 4909 rtwsta_link->prev_rssi = rssi_curr; 4910 rssi_data->rssi_changed = true; 4911 } 4912 } 4913 4914 static void rtw89_phy_stat_rssi_update_iter(void *data, 4915 struct ieee80211_sta *sta) 4916 { 4917 struct rtw89_phy_iter_rssi_data *rssi_data = 4918 (struct rtw89_phy_iter_rssi_data *)data; 4919 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 4920 struct rtw89_sta_link *rtwsta_link; 4921 unsigned int link_id; 4922 4923 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) 4924 __rtw89_phy_stat_rssi_update_iter(rtwsta_link, rssi_data); 4925 } 4926 4927 static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev) 4928 { 4929 struct rtw89_phy_iter_rssi_data rssi_data = {0}; 4930 4931 rssi_data.rtwdev = rtwdev; 4932 rssi_data.ch_info = &rtwdev->ch_info; 4933 rssi_data.ch_info->rssi_min = U8_MAX; 4934 ieee80211_iterate_stations_atomic(rtwdev->hw, 4935 rtw89_phy_stat_rssi_update_iter, 4936 &rssi_data); 4937 if (rssi_data.rssi_changed) 4938 rtw89_btc_ntfy_wl_sta(rtwdev); 4939 } 4940 4941 static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev) 4942 { 4943 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 4944 int i; 4945 4946 for (i = 0; i < rtwdev->chip->rf_path_num; i++) 4947 ewma_thermal_init(&phystat->avg_thermal[i]); 4948 4949 rtw89_phy_stat_thermal_update(rtwdev); 4950 4951 memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); 4952 memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat)); 4953 4954 ewma_rssi_init(&phystat->bcn_rssi); 4955 4956 rtwdev->hal.thermal_prot_lv = 0; 4957 } 4958 4959 void rtw89_phy_stat_track(struct rtw89_dev *rtwdev) 4960 { 4961 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 4962 4963 rtw89_phy_stat_thermal_update(rtwdev); 4964 rtw89_phy_thermal_protect(rtwdev); 4965 rtw89_phy_stat_rssi_update(rtwdev); 4966 4967 phystat->last_pkt_stat = phystat->cur_pkt_stat; 4968 memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); 4969 } 4970 4971 static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us) 4972 { 4973 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4974 4975 return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); 4976 } 4977 4978 static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx) 4979 { 4980 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4981 4982 return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); 4983 } 4984 4985 static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) 4986 { 4987 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4988 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4989 const struct rtw89_ccx_regs *ccx = phy->ccx; 4990 4991 env->ccx_manual_ctrl = false; 4992 env->ccx_ongoing = false; 4993 env->ccx_rac_lv = RTW89_RAC_RELEASE; 4994 env->ccx_period = 0; 4995 env->ccx_unit_idx = RTW89_CCX_32_US; 4996 4997 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1); 4998 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1); 4999 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); 5000 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask, 5001 RTW89_CCX_EDCCA_BW20_0); 5002 } 5003 5004 static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report, 5005 u16 score) 5006 { 5007 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5008 u32 numer = 0; 5009 u16 ret = 0; 5010 5011 numer = report * score + (env->ccx_period >> 1); 5012 if (env->ccx_period) 5013 ret = numer / env->ccx_period; 5014 5015 return ret >= score ? score - 1 : ret; 5016 } 5017 5018 static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev, 5019 u16 time_ms, u32 *period, 5020 u32 *unit_idx) 5021 { 5022 u32 idx; 5023 u8 quotient; 5024 5025 if (time_ms >= CCX_MAX_PERIOD) 5026 time_ms = CCX_MAX_PERIOD; 5027 5028 quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD; 5029 5030 if (quotient < 4) 5031 idx = RTW89_CCX_4_US; 5032 else if (quotient < 8) 5033 idx = RTW89_CCX_8_US; 5034 else if (quotient < 16) 5035 idx = RTW89_CCX_16_US; 5036 else 5037 idx = RTW89_CCX_32_US; 5038 5039 *unit_idx = idx; 5040 *period = (time_ms * MS_TO_4US_RATIO) >> idx; 5041 5042 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5043 "[Trigger Time] period:%d, unit_idx:%d\n", 5044 *period, *unit_idx); 5045 } 5046 5047 static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev) 5048 { 5049 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5050 5051 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5052 "lv:(%d)->(0)\n", env->ccx_rac_lv); 5053 5054 env->ccx_ongoing = false; 5055 env->ccx_rac_lv = RTW89_RAC_RELEASE; 5056 env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5057 } 5058 5059 static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev, 5060 struct rtw89_ccx_para_info *para) 5061 { 5062 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5063 bool is_update = env->ifs_clm_app != para->ifs_clm_app; 5064 u8 i = 0; 5065 u16 *ifs_th_l = env->ifs_clm_th_l; 5066 u16 *ifs_th_h = env->ifs_clm_th_h; 5067 u32 ifs_th0_us = 0, ifs_th_times = 0; 5068 u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0}; 5069 5070 if (!is_update) 5071 goto ifs_update_finished; 5072 5073 switch (para->ifs_clm_app) { 5074 case RTW89_IFS_CLM_INIT: 5075 case RTW89_IFS_CLM_BACKGROUND: 5076 case RTW89_IFS_CLM_ACS: 5077 case RTW89_IFS_CLM_DBG: 5078 case RTW89_IFS_CLM_DIG: 5079 case RTW89_IFS_CLM_TDMA_DIG: 5080 ifs_th0_us = IFS_CLM_TH0_UPPER; 5081 ifs_th_times = IFS_CLM_TH_MUL; 5082 break; 5083 case RTW89_IFS_CLM_DBG_MANUAL: 5084 ifs_th0_us = para->ifs_clm_manual_th0; 5085 ifs_th_times = para->ifs_clm_manual_th_times; 5086 break; 5087 default: 5088 break; 5089 } 5090 5091 /* Set sampling threshold for 4 different regions, unit in idx_cnt. 5092 * low[i] = high[i-1] + 1 5093 * high[i] = high[i-1] * ifs_th_times 5094 */ 5095 ifs_th_l[IFS_CLM_TH_START_IDX] = 0; 5096 ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us; 5097 ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, 5098 ifs_th0_us); 5099 for (i = 1; i < RTW89_IFS_CLM_NUM; i++) { 5100 ifs_th_l[i] = ifs_th_h[i - 1] + 1; 5101 ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times; 5102 ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]); 5103 } 5104 5105 ifs_update_finished: 5106 if (!is_update) 5107 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5108 "No need to update IFS_TH\n"); 5109 5110 return is_update; 5111 } 5112 5113 static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) 5114 { 5115 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5116 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5117 const struct rtw89_ccx_regs *ccx = phy->ccx; 5118 u8 i = 0; 5119 5120 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask, 5121 env->ifs_clm_th_l[0]); 5122 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask, 5123 env->ifs_clm_th_l[1]); 5124 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask, 5125 env->ifs_clm_th_l[2]); 5126 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask, 5127 env->ifs_clm_th_l[3]); 5128 5129 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask, 5130 env->ifs_clm_th_h[0]); 5131 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask, 5132 env->ifs_clm_th_h[1]); 5133 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask, 5134 env->ifs_clm_th_h[2]); 5135 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask, 5136 env->ifs_clm_th_h[3]); 5137 5138 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5139 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5140 "Update IFS_T%d_th{low, high} : {%d, %d}\n", 5141 i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]); 5142 } 5143 5144 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) 5145 { 5146 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5147 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5148 const struct rtw89_ccx_regs *ccx = phy->ccx; 5149 struct rtw89_ccx_para_info para = {0}; 5150 5151 env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5152 env->ifs_clm_mntr_time = 0; 5153 5154 para.ifs_clm_app = RTW89_IFS_CLM_INIT; 5155 if (rtw89_phy_ifs_clm_th_update_check(rtwdev, ¶)) 5156 rtw89_phy_ifs_clm_set_th_reg(rtwdev); 5157 5158 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true); 5159 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true); 5160 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true); 5161 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true); 5162 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true); 5163 } 5164 5165 static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, 5166 enum rtw89_env_racing_lv level) 5167 { 5168 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5169 int ret = 0; 5170 5171 if (level >= RTW89_RAC_MAX_NUM) { 5172 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5173 "[WARNING] Wrong LV=%d\n", level); 5174 return -EINVAL; 5175 } 5176 5177 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5178 "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing, 5179 env->ccx_rac_lv, level); 5180 5181 if (env->ccx_ongoing) { 5182 if (level <= env->ccx_rac_lv) 5183 ret = -EINVAL; 5184 else 5185 env->ccx_ongoing = false; 5186 } 5187 5188 if (ret == 0) 5189 env->ccx_rac_lv = level; 5190 5191 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n", 5192 !ret); 5193 5194 return ret; 5195 } 5196 5197 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev) 5198 { 5199 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5200 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5201 const struct rtw89_ccx_regs *ccx = phy->ccx; 5202 5203 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0); 5204 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0); 5205 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1); 5206 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); 5207 5208 env->ccx_ongoing = true; 5209 } 5210 5211 static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) 5212 { 5213 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5214 u8 i = 0; 5215 u32 res = 0; 5216 5217 env->ifs_clm_tx_ratio = 5218 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT); 5219 env->ifs_clm_edcca_excl_cca_ratio = 5220 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca, 5221 PERCENT); 5222 env->ifs_clm_cck_fa_ratio = 5223 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT); 5224 env->ifs_clm_ofdm_fa_ratio = 5225 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT); 5226 env->ifs_clm_cck_cca_excl_fa_ratio = 5227 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa, 5228 PERCENT); 5229 env->ifs_clm_ofdm_cca_excl_fa_ratio = 5230 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa, 5231 PERCENT); 5232 env->ifs_clm_cck_fa_permil = 5233 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL); 5234 env->ifs_clm_ofdm_fa_permil = 5235 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL); 5236 5237 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) { 5238 if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) { 5239 env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD; 5240 } else { 5241 env->ifs_clm_ifs_avg[i] = 5242 rtw89_phy_ccx_idx_to_us(rtwdev, 5243 env->ifs_clm_avg[i]); 5244 } 5245 5246 res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]); 5247 res += env->ifs_clm_his[i] >> 1; 5248 if (env->ifs_clm_his[i]) 5249 res /= env->ifs_clm_his[i]; 5250 else 5251 res = 0; 5252 env->ifs_clm_cca_avg[i] = res; 5253 } 5254 5255 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5256 "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n", 5257 env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio); 5258 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5259 "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n", 5260 env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio); 5261 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5262 "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n", 5263 env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil); 5264 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5265 "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n", 5266 env->ifs_clm_cck_cca_excl_fa_ratio, 5267 env->ifs_clm_ofdm_cca_excl_fa_ratio); 5268 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5269 "Time:[his, ifs_avg(us), cca_avg(us)]\n"); 5270 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5271 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n", 5272 i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i], 5273 env->ifs_clm_cca_avg[i]); 5274 } 5275 5276 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) 5277 { 5278 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5279 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5280 const struct rtw89_ccx_regs *ccx = phy->ccx; 5281 u8 i = 0; 5282 5283 if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, 5284 ccx->ifs_cnt_done_mask) == 0) { 5285 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5286 "Get IFS_CLM report Fail\n"); 5287 return false; 5288 } 5289 5290 env->ifs_clm_tx = 5291 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, 5292 ccx->ifs_clm_tx_cnt_msk); 5293 env->ifs_clm_edcca_excl_cca = 5294 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, 5295 ccx->ifs_clm_edcca_excl_cca_fa_mask); 5296 env->ifs_clm_cckcca_excl_fa = 5297 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, 5298 ccx->ifs_clm_cckcca_excl_fa_mask); 5299 env->ifs_clm_ofdmcca_excl_fa = 5300 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, 5301 ccx->ifs_clm_ofdmcca_excl_fa_mask); 5302 env->ifs_clm_cckfa = 5303 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, 5304 ccx->ifs_clm_cck_fa_mask); 5305 env->ifs_clm_ofdmfa = 5306 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, 5307 ccx->ifs_clm_ofdm_fa_mask); 5308 5309 env->ifs_clm_his[0] = 5310 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5311 ccx->ifs_t1_his_mask); 5312 env->ifs_clm_his[1] = 5313 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5314 ccx->ifs_t2_his_mask); 5315 env->ifs_clm_his[2] = 5316 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5317 ccx->ifs_t3_his_mask); 5318 env->ifs_clm_his[3] = 5319 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5320 ccx->ifs_t4_his_mask); 5321 5322 env->ifs_clm_avg[0] = 5323 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, 5324 ccx->ifs_t1_avg_mask); 5325 env->ifs_clm_avg[1] = 5326 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, 5327 ccx->ifs_t2_avg_mask); 5328 env->ifs_clm_avg[2] = 5329 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, 5330 ccx->ifs_t3_avg_mask); 5331 env->ifs_clm_avg[3] = 5332 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, 5333 ccx->ifs_t4_avg_mask); 5334 5335 env->ifs_clm_cca[0] = 5336 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, 5337 ccx->ifs_t1_cca_mask); 5338 env->ifs_clm_cca[1] = 5339 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, 5340 ccx->ifs_t2_cca_mask); 5341 env->ifs_clm_cca[2] = 5342 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, 5343 ccx->ifs_t3_cca_mask); 5344 env->ifs_clm_cca[3] = 5345 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, 5346 ccx->ifs_t4_cca_mask); 5347 5348 env->ifs_clm_total_ifs = 5349 rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, 5350 ccx->ifs_total_mask); 5351 5352 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n", 5353 env->ifs_clm_total_ifs); 5354 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5355 "{Tx, EDCCA_exclu_cca} = {%d, %d}\n", 5356 env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca); 5357 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5358 "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n", 5359 env->ifs_clm_cckfa, env->ifs_clm_ofdmfa); 5360 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5361 "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n", 5362 env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa); 5363 5364 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n"); 5365 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5366 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5367 "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i], 5368 env->ifs_clm_avg[i], env->ifs_clm_cca[i]); 5369 5370 rtw89_phy_ifs_clm_get_utility(rtwdev); 5371 5372 return true; 5373 } 5374 5375 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, 5376 struct rtw89_ccx_para_info *para) 5377 { 5378 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5379 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5380 const struct rtw89_ccx_regs *ccx = phy->ccx; 5381 u32 period = 0; 5382 u32 unit_idx = 0; 5383 5384 if (para->mntr_time == 0) { 5385 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5386 "[WARN] MNTR_TIME is 0\n"); 5387 return -EINVAL; 5388 } 5389 5390 if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv)) 5391 return -EINVAL; 5392 5393 if (para->mntr_time != env->ifs_clm_mntr_time) { 5394 rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, 5395 &period, &unit_idx); 5396 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, 5397 ccx->ifs_clm_period_mask, period); 5398 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, 5399 ccx->ifs_clm_cnt_unit_mask, 5400 unit_idx); 5401 5402 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5403 "Update IFS-CLM time ((%d)) -> ((%d))\n", 5404 env->ifs_clm_mntr_time, para->mntr_time); 5405 5406 env->ifs_clm_mntr_time = para->mntr_time; 5407 env->ccx_period = (u16)period; 5408 env->ccx_unit_idx = (u8)unit_idx; 5409 } 5410 5411 if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) { 5412 env->ifs_clm_app = para->ifs_clm_app; 5413 rtw89_phy_ifs_clm_set_th_reg(rtwdev); 5414 } 5415 5416 return 0; 5417 } 5418 5419 void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) 5420 { 5421 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5422 struct rtw89_ccx_para_info para = {0}; 5423 u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL; 5424 5425 env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL; 5426 if (env->ccx_manual_ctrl) { 5427 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5428 "CCX in manual ctrl\n"); 5429 return; 5430 } 5431 5432 /* only ifs_clm for now */ 5433 if (rtw89_phy_ifs_clm_get_result(rtwdev)) 5434 env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM; 5435 5436 rtw89_phy_ccx_racing_release(rtwdev); 5437 para.mntr_time = 1900; 5438 para.rac_lv = RTW89_RAC_LV_1; 5439 para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5440 5441 if (rtw89_phy_ifs_clm_set(rtwdev, ¶) == 0) 5442 chk_result |= RTW89_PHY_ENV_MON_IFS_CLM; 5443 if (chk_result) 5444 rtw89_phy_ccx_trigger(rtwdev); 5445 5446 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5447 "get_result=0x%x, chk_result:0x%x\n", 5448 env->ccx_watchdog_result, chk_result); 5449 } 5450 5451 static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page) 5452 { 5453 if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM || 5454 *ie_page == RTW89_RSVD_9) 5455 return false; 5456 else if (*ie_page > RTW89_RSVD_9) 5457 *ie_page -= 1; 5458 5459 return true; 5460 } 5461 5462 static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page) 5463 { 5464 static const u8 ie_page_shift = 2; 5465 5466 return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift); 5467 } 5468 5469 static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev, 5470 enum rtw89_phy_status_bitmap ie_page, 5471 enum rtw89_phy_idx phy_idx) 5472 { 5473 u32 addr; 5474 5475 if (!rtw89_physts_ie_page_valid(&ie_page)) 5476 return 0; 5477 5478 addr = rtw89_phy_get_ie_bitmap_addr(ie_page); 5479 5480 return rtw89_phy_read32_idx(rtwdev, addr, MASKDWORD, phy_idx); 5481 } 5482 5483 static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev, 5484 enum rtw89_phy_status_bitmap ie_page, 5485 u32 val, enum rtw89_phy_idx phy_idx) 5486 { 5487 const struct rtw89_chip_info *chip = rtwdev->chip; 5488 u32 addr; 5489 5490 if (!rtw89_physts_ie_page_valid(&ie_page)) 5491 return; 5492 5493 if (chip->chip_id == RTL8852A) 5494 val &= B_PHY_STS_BITMAP_MSK_52A; 5495 5496 addr = rtw89_phy_get_ie_bitmap_addr(ie_page); 5497 rtw89_phy_write32_idx(rtwdev, addr, MASKDWORD, val, phy_idx); 5498 } 5499 5500 static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev, 5501 enum rtw89_phy_status_bitmap bitmap, 5502 enum rtw89_phy_status_ie_type ie, 5503 bool enable, enum rtw89_phy_idx phy_idx) 5504 { 5505 u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap, phy_idx); 5506 5507 if (enable) 5508 val |= BIT(ie); 5509 else 5510 val &= ~BIT(ie); 5511 5512 rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val, phy_idx); 5513 } 5514 5515 static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev, 5516 bool enable, 5517 enum rtw89_phy_idx phy_idx) 5518 { 5519 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5520 const struct rtw89_physts_regs *physts = phy->physts; 5521 5522 if (enable) { 5523 rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr, 5524 physts->dis_trigger_fail_mask, phy_idx); 5525 rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr, 5526 physts->dis_trigger_brk_mask, phy_idx); 5527 } else { 5528 rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr, 5529 physts->dis_trigger_fail_mask, phy_idx); 5530 rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr, 5531 physts->dis_trigger_brk_mask, phy_idx); 5532 } 5533 } 5534 5535 static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev, 5536 enum rtw89_phy_idx phy_idx) 5537 { 5538 u8 i; 5539 5540 rtw89_physts_enable_fail_report(rtwdev, false, phy_idx); 5541 5542 for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) { 5543 if (i >= RTW89_CCK_PKT) 5544 rtw89_physts_enable_ie_bitmap(rtwdev, i, 5545 RTW89_PHYSTS_IE09_FTR_0, 5546 true, phy_idx); 5547 if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) || 5548 (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT)) 5549 continue; 5550 rtw89_physts_enable_ie_bitmap(rtwdev, i, 5551 RTW89_PHYSTS_IE24_OFDM_TD_PATH_A, 5552 true, phy_idx); 5553 } 5554 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT, 5555 RTW89_PHYSTS_IE13_DL_MU_DEF, true, phy_idx); 5556 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT, 5557 RTW89_PHYSTS_IE13_DL_MU_DEF, true, phy_idx); 5558 5559 /* force IE01 for channel index, only channel field is valid */ 5560 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT, 5561 RTW89_PHYSTS_IE01_CMN_OFDM, true, phy_idx); 5562 } 5563 5564 static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev) 5565 { 5566 __rtw89_physts_parsing_init(rtwdev, RTW89_PHY_0); 5567 if (rtwdev->dbcc_en) 5568 __rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1); 5569 } 5570 5571 static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) 5572 { 5573 const struct rtw89_chip_info *chip = rtwdev->chip; 5574 struct rtw89_dig_info *dig = &rtwdev->dig; 5575 const struct rtw89_phy_dig_gain_cfg *cfg; 5576 const char *msg; 5577 u8 i; 5578 s8 gain_base; 5579 s8 *gain_arr; 5580 u32 tmp; 5581 5582 switch (type) { 5583 case RTW89_DIG_GAIN_LNA_G: 5584 gain_arr = dig->lna_gain_g; 5585 gain_base = LNA0_GAIN; 5586 cfg = chip->dig_table->cfg_lna_g; 5587 msg = "lna_gain_g"; 5588 break; 5589 case RTW89_DIG_GAIN_TIA_G: 5590 gain_arr = dig->tia_gain_g; 5591 gain_base = TIA0_GAIN_G; 5592 cfg = chip->dig_table->cfg_tia_g; 5593 msg = "tia_gain_g"; 5594 break; 5595 case RTW89_DIG_GAIN_LNA_A: 5596 gain_arr = dig->lna_gain_a; 5597 gain_base = LNA0_GAIN; 5598 cfg = chip->dig_table->cfg_lna_a; 5599 msg = "lna_gain_a"; 5600 break; 5601 case RTW89_DIG_GAIN_TIA_A: 5602 gain_arr = dig->tia_gain_a; 5603 gain_base = TIA0_GAIN_A; 5604 cfg = chip->dig_table->cfg_tia_a; 5605 msg = "tia_gain_a"; 5606 break; 5607 default: 5608 return; 5609 } 5610 5611 for (i = 0; i < cfg->size; i++) { 5612 tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr, 5613 cfg->table[i].mask); 5614 tmp >>= DIG_GAIN_SHIFT; 5615 gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base; 5616 gain_base += DIG_GAIN; 5617 5618 rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n", 5619 msg, i, gain_arr[i]); 5620 } 5621 } 5622 5623 static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev) 5624 { 5625 struct rtw89_dig_info *dig = &rtwdev->dig; 5626 u32 tmp; 5627 u8 i; 5628 5629 if (!rtwdev->hal.support_igi) 5630 return; 5631 5632 tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW, 5633 B_PATH0_IB_PKPW_MSK); 5634 dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT); 5635 dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK, 5636 B_PATH0_IB_PBK_MSK); 5637 rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n", 5638 dig->ib_pkpwr, dig->ib_pbk); 5639 5640 for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++) 5641 rtw89_phy_dig_read_gain_table(rtwdev, i); 5642 } 5643 5644 static const u8 rssi_nolink = 22; 5645 static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104}; 5646 static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88}; 5647 static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16}; 5648 static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528}; 5649 5650 static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev) 5651 { 5652 struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; 5653 struct rtw89_dig_info *dig = &rtwdev->dig; 5654 bool is_linked = rtwdev->total_sta_assoc > 0; 5655 5656 if (is_linked) { 5657 dig->igi_rssi = ch_info->rssi_min >> 1; 5658 } else { 5659 rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n"); 5660 dig->igi_rssi = rssi_nolink; 5661 } 5662 } 5663 5664 static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev) 5665 { 5666 struct rtw89_dig_info *dig = &rtwdev->dig; 5667 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); 5668 bool is_linked = rtwdev->total_sta_assoc > 0; 5669 const u16 *fa_th_src = NULL; 5670 5671 switch (chan->band_type) { 5672 case RTW89_BAND_2G: 5673 dig->lna_gain = dig->lna_gain_g; 5674 dig->tia_gain = dig->tia_gain_g; 5675 fa_th_src = is_linked ? fa_th_2g : fa_th_nolink; 5676 dig->force_gaincode_idx_en = false; 5677 dig->dyn_pd_th_en = true; 5678 break; 5679 case RTW89_BAND_5G: 5680 default: 5681 dig->lna_gain = dig->lna_gain_a; 5682 dig->tia_gain = dig->tia_gain_a; 5683 fa_th_src = is_linked ? fa_th_5g : fa_th_nolink; 5684 dig->force_gaincode_idx_en = true; 5685 dig->dyn_pd_th_en = true; 5686 break; 5687 } 5688 memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th)); 5689 memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th)); 5690 } 5691 5692 static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20; 5693 static const u8 igi_max_performance_mode = 0x5a; 5694 static const u8 dynamic_pd_threshold_max; 5695 5696 static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev) 5697 { 5698 struct rtw89_dig_info *dig = &rtwdev->dig; 5699 5700 dig->cur_gaincode.lna_idx = LNA_IDX_MAX; 5701 dig->cur_gaincode.tia_idx = TIA_IDX_MAX; 5702 dig->cur_gaincode.rxb_idx = RXB_IDX_MAX; 5703 dig->force_gaincode.lna_idx = LNA_IDX_MAX; 5704 dig->force_gaincode.tia_idx = TIA_IDX_MAX; 5705 dig->force_gaincode.rxb_idx = RXB_IDX_MAX; 5706 5707 dig->dyn_igi_max = igi_max_performance_mode; 5708 dig->dyn_igi_min = dynamic_igi_min; 5709 dig->dyn_pd_th_max = dynamic_pd_threshold_max; 5710 dig->pd_low_th_ofst = pd_low_th_offset; 5711 dig->is_linked_pre = false; 5712 } 5713 5714 static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev) 5715 { 5716 rtw89_phy_dig_update_gain_para(rtwdev); 5717 rtw89_phy_dig_reset(rtwdev); 5718 } 5719 5720 static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) 5721 { 5722 struct rtw89_dig_info *dig = &rtwdev->dig; 5723 u8 lna_idx; 5724 5725 if (rssi < dig->igi_rssi_th[0]) 5726 lna_idx = RTW89_DIG_GAIN_LNA_IDX6; 5727 else if (rssi < dig->igi_rssi_th[1]) 5728 lna_idx = RTW89_DIG_GAIN_LNA_IDX5; 5729 else if (rssi < dig->igi_rssi_th[2]) 5730 lna_idx = RTW89_DIG_GAIN_LNA_IDX4; 5731 else if (rssi < dig->igi_rssi_th[3]) 5732 lna_idx = RTW89_DIG_GAIN_LNA_IDX3; 5733 else if (rssi < dig->igi_rssi_th[4]) 5734 lna_idx = RTW89_DIG_GAIN_LNA_IDX2; 5735 else 5736 lna_idx = RTW89_DIG_GAIN_LNA_IDX1; 5737 5738 return lna_idx; 5739 } 5740 5741 static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) 5742 { 5743 struct rtw89_dig_info *dig = &rtwdev->dig; 5744 u8 tia_idx; 5745 5746 if (rssi < dig->igi_rssi_th[0]) 5747 tia_idx = RTW89_DIG_GAIN_TIA_IDX1; 5748 else 5749 tia_idx = RTW89_DIG_GAIN_TIA_IDX0; 5750 5751 return tia_idx; 5752 } 5753 5754 #define IB_PBK_BASE 110 5755 #define WB_RSSI_BASE 10 5756 static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, 5757 struct rtw89_agc_gaincode_set *set) 5758 { 5759 struct rtw89_dig_info *dig = &rtwdev->dig; 5760 s8 lna_gain = dig->lna_gain[set->lna_idx]; 5761 s8 tia_gain = dig->tia_gain[set->tia_idx]; 5762 s32 wb_rssi = rssi + lna_gain + tia_gain; 5763 s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE; 5764 u8 rxb_idx; 5765 5766 rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi; 5767 rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX); 5768 5769 rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n", 5770 wb_rssi, rxb_idx_tmp); 5771 5772 return rxb_idx; 5773 } 5774 5775 static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, 5776 struct rtw89_agc_gaincode_set *set) 5777 { 5778 set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi); 5779 set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi); 5780 set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set); 5781 5782 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5783 "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n", 5784 rssi, set->lna_idx, set->tia_idx, set->rxb_idx); 5785 } 5786 5787 #define IGI_OFFSET_MAX 25 5788 #define IGI_OFFSET_MUL 2 5789 static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev) 5790 { 5791 struct rtw89_dig_info *dig = &rtwdev->dig; 5792 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5793 enum rtw89_dig_noisy_level noisy_lv; 5794 u8 igi_offset = dig->fa_rssi_ofst; 5795 u16 fa_ratio = 0; 5796 5797 fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil; 5798 5799 if (fa_ratio < dig->fa_th[0]) 5800 noisy_lv = RTW89_DIG_NOISY_LEVEL0; 5801 else if (fa_ratio < dig->fa_th[1]) 5802 noisy_lv = RTW89_DIG_NOISY_LEVEL1; 5803 else if (fa_ratio < dig->fa_th[2]) 5804 noisy_lv = RTW89_DIG_NOISY_LEVEL2; 5805 else if (fa_ratio < dig->fa_th[3]) 5806 noisy_lv = RTW89_DIG_NOISY_LEVEL3; 5807 else 5808 noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX; 5809 5810 if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2) 5811 igi_offset = 0; 5812 else 5813 igi_offset += noisy_lv * IGI_OFFSET_MUL; 5814 5815 igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX); 5816 dig->fa_rssi_ofst = igi_offset; 5817 5818 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5819 "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n", 5820 dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]); 5821 5822 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5823 "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n", 5824 env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil, 5825 env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil, 5826 noisy_lv, igi_offset); 5827 } 5828 5829 static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx) 5830 { 5831 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5832 5833 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr, 5834 dig_regs->p0_lna_init.mask, lna_idx); 5835 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr, 5836 dig_regs->p1_lna_init.mask, lna_idx); 5837 } 5838 5839 static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx) 5840 { 5841 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5842 5843 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr, 5844 dig_regs->p0_tia_init.mask, tia_idx); 5845 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr, 5846 dig_regs->p1_tia_init.mask, tia_idx); 5847 } 5848 5849 static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx) 5850 { 5851 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5852 5853 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr, 5854 dig_regs->p0_rxb_init.mask, rxb_idx); 5855 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr, 5856 dig_regs->p1_rxb_init.mask, rxb_idx); 5857 } 5858 5859 static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev, 5860 const struct rtw89_agc_gaincode_set set) 5861 { 5862 if (!rtwdev->hal.support_igi) 5863 return; 5864 5865 rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx); 5866 rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx); 5867 rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx); 5868 5869 rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n", 5870 set.lna_idx, set.tia_idx, set.rxb_idx); 5871 } 5872 5873 static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev, 5874 bool enable) 5875 { 5876 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5877 5878 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr, 5879 dig_regs->p0_p20_pagcugc_en.mask, enable); 5880 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr, 5881 dig_regs->p0_s20_pagcugc_en.mask, enable); 5882 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr, 5883 dig_regs->p1_p20_pagcugc_en.mask, enable); 5884 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr, 5885 dig_regs->p1_s20_pagcugc_en.mask, enable); 5886 5887 rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable); 5888 } 5889 5890 static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev) 5891 { 5892 struct rtw89_dig_info *dig = &rtwdev->dig; 5893 5894 if (!rtwdev->hal.support_igi) 5895 return; 5896 5897 if (dig->force_gaincode_idx_en) { 5898 rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); 5899 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5900 "Force gaincode index enabled.\n"); 5901 } else { 5902 rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi, 5903 &dig->cur_gaincode); 5904 rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode); 5905 } 5906 } 5907 5908 static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, 5909 bool enable) 5910 { 5911 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); 5912 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5913 enum rtw89_bandwidth cbw = chan->band_width; 5914 struct rtw89_dig_info *dig = &rtwdev->dig; 5915 u8 final_rssi = 0, under_region = dig->pd_low_th_ofst; 5916 u8 ofdm_cca_th; 5917 s8 cck_cca_th; 5918 u32 pd_val = 0; 5919 5920 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) 5921 under_region += PD_TH_SB_FLTR_CMP_VAL; 5922 5923 switch (cbw) { 5924 case RTW89_CHANNEL_WIDTH_40: 5925 under_region += PD_TH_BW40_CMP_VAL; 5926 break; 5927 case RTW89_CHANNEL_WIDTH_80: 5928 under_region += PD_TH_BW80_CMP_VAL; 5929 break; 5930 case RTW89_CHANNEL_WIDTH_160: 5931 under_region += PD_TH_BW160_CMP_VAL; 5932 break; 5933 case RTW89_CHANNEL_WIDTH_20: 5934 fallthrough; 5935 default: 5936 under_region += PD_TH_BW20_CMP_VAL; 5937 break; 5938 } 5939 5940 dig->dyn_pd_th_max = dig->igi_rssi; 5941 5942 final_rssi = min_t(u8, rssi, dig->igi_rssi); 5943 ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region, 5944 PD_TH_MAX_RSSI + under_region); 5945 5946 if (enable) { 5947 pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1; 5948 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5949 "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n", 5950 final_rssi, ofdm_cca_th, under_region, pd_val); 5951 } else { 5952 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5953 "Dynamic PD th disabled, Set PD_low_bd=0\n"); 5954 } 5955 5956 rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, 5957 dig_regs->pd_lower_bound_mask, pd_val); 5958 rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, 5959 dig_regs->pd_spatial_reuse_en, enable); 5960 5961 if (!rtwdev->hal.support_cckpd) 5962 return; 5963 5964 cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI); 5965 pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX); 5966 5967 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5968 "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n", 5969 final_rssi, cck_cca_th, under_region, pd_val); 5970 5971 rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg, 5972 dig_regs->bmode_cca_rssi_limit_en, enable); 5973 rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg, 5974 dig_regs->bmode_rssi_nocca_low_th_mask, pd_val); 5975 } 5976 5977 void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) 5978 { 5979 struct rtw89_dig_info *dig = &rtwdev->dig; 5980 5981 dig->bypass_dig = false; 5982 rtw89_phy_dig_para_reset(rtwdev); 5983 rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); 5984 rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false); 5985 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); 5986 rtw89_phy_dig_update_para(rtwdev); 5987 } 5988 5989 #define IGI_RSSI_MIN 10 5990 #define ABS_IGI_MIN 0xc 5991 void rtw89_phy_dig(struct rtw89_dev *rtwdev) 5992 { 5993 struct rtw89_dig_info *dig = &rtwdev->dig; 5994 bool is_linked = rtwdev->total_sta_assoc > 0; 5995 u8 igi_min; 5996 5997 if (unlikely(dig->bypass_dig)) { 5998 dig->bypass_dig = false; 5999 return; 6000 } 6001 6002 rtw89_phy_dig_update_rssi_info(rtwdev); 6003 6004 if (!dig->is_linked_pre && is_linked) { 6005 rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n"); 6006 rtw89_phy_dig_update_para(rtwdev); 6007 dig->igi_fa_rssi = dig->igi_rssi; 6008 } else if (dig->is_linked_pre && !is_linked) { 6009 rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n"); 6010 rtw89_phy_dig_update_para(rtwdev); 6011 dig->igi_fa_rssi = dig->igi_rssi; 6012 } 6013 dig->is_linked_pre = is_linked; 6014 6015 rtw89_phy_dig_igi_offset_by_env(rtwdev); 6016 6017 igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0); 6018 dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode); 6019 dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN); 6020 6021 if (dig->dyn_igi_max >= dig->dyn_igi_min) { 6022 dig->igi_fa_rssi += dig->fa_rssi_ofst; 6023 dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min, 6024 dig->dyn_igi_max); 6025 } else { 6026 dig->igi_fa_rssi = dig->dyn_igi_max; 6027 } 6028 6029 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6030 "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n", 6031 dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, 6032 dig->igi_fa_rssi); 6033 6034 rtw89_phy_dig_config_igi(rtwdev); 6035 6036 rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); 6037 6038 if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max) 6039 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true); 6040 else 6041 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); 6042 } 6043 6044 static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev, 6045 struct rtw89_sta_link *rtwsta_link) 6046 { 6047 struct rtw89_hal *hal = &rtwdev->hal; 6048 u8 rssi_a, rssi_b; 6049 u32 candidate; 6050 6051 rssi_a = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_A]); 6052 rssi_b = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_B]); 6053 6054 if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH) 6055 candidate = RF_A; 6056 else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH) 6057 candidate = RF_B; 6058 else 6059 return; 6060 6061 if (hal->antenna_tx == candidate) 6062 return; 6063 6064 hal->antenna_tx = candidate; 6065 rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta_link); 6066 6067 if (hal->antenna_tx == RF_A) { 6068 rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12); 6069 rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11); 6070 } else if (hal->antenna_tx == RF_B) { 6071 rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11); 6072 rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12); 6073 } 6074 } 6075 6076 static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta) 6077 { 6078 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 6079 struct rtw89_dev *rtwdev = rtwsta->rtwdev; 6080 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 6081 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 6082 struct rtw89_vif_link *rtwvif_link; 6083 struct rtw89_sta_link *rtwsta_link; 6084 unsigned int link_id; 6085 bool *done = data; 6086 6087 if (WARN(ieee80211_vif_is_mld(vif), "MLD mix path_div\n")) 6088 return; 6089 6090 if (sta->tdls) 6091 return; 6092 6093 if (*done) 6094 return; 6095 6096 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 6097 rtwvif_link = rtwsta_link->rtwvif_link; 6098 if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION) 6099 continue; 6100 6101 *done = true; 6102 __rtw89_phy_tx_path_div_sta_iter(rtwdev, rtwsta_link); 6103 return; 6104 } 6105 } 6106 6107 void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev) 6108 { 6109 struct rtw89_hal *hal = &rtwdev->hal; 6110 bool done = false; 6111 6112 if (!hal->tx_path_diversity) 6113 return; 6114 6115 ieee80211_iterate_stations_atomic(rtwdev->hw, 6116 rtw89_phy_tx_path_div_sta_iter, 6117 &done); 6118 } 6119 6120 #define ANTDIV_MAIN 0 6121 #define ANTDIV_AUX 1 6122 6123 static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev) 6124 { 6125 struct rtw89_hal *hal = &rtwdev->hal; 6126 u8 default_ant, optional_ant; 6127 6128 if (!hal->ant_diversity || hal->antenna_tx == 0) 6129 return; 6130 6131 if (hal->antenna_tx == RF_B) { 6132 default_ant = ANTDIV_AUX; 6133 optional_ant = ANTDIV_MAIN; 6134 } else { 6135 default_ant = ANTDIV_MAIN; 6136 optional_ant = ANTDIV_AUX; 6137 } 6138 6139 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL, 6140 default_ant, RTW89_PHY_0); 6141 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI, 6142 default_ant, RTW89_PHY_0); 6143 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT, 6144 optional_ant, RTW89_PHY_0); 6145 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI, 6146 default_ant, RTW89_PHY_0); 6147 } 6148 6149 static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev) 6150 { 6151 struct rtw89_hal *hal = &rtwdev->hal; 6152 6153 hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A; 6154 hal->antenna_tx = hal->antenna_rx; 6155 } 6156 6157 static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev) 6158 { 6159 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6160 struct rtw89_hal *hal = &rtwdev->hal; 6161 bool no_change = false; 6162 u8 main_rssi, aux_rssi; 6163 u8 main_evm, aux_evm; 6164 u32 candidate; 6165 6166 antdiv->get_stats = false; 6167 antdiv->training_count = 0; 6168 6169 main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats); 6170 main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats); 6171 aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats); 6172 aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats); 6173 6174 if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH) 6175 candidate = RF_A; 6176 else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH) 6177 candidate = RF_B; 6178 else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH) 6179 candidate = RF_A; 6180 else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH) 6181 candidate = RF_B; 6182 else 6183 no_change = true; 6184 6185 if (no_change) { 6186 /* swap back from training antenna to original */ 6187 rtw89_phy_swap_hal_antenna(rtwdev); 6188 return; 6189 } 6190 6191 hal->antenna_tx = candidate; 6192 hal->antenna_rx = candidate; 6193 } 6194 6195 static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev) 6196 { 6197 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6198 u64 state_period; 6199 6200 if (antdiv->training_count % 2 == 0) { 6201 if (antdiv->training_count == 0) 6202 rtw89_phy_antdiv_sts_reset(rtwdev); 6203 6204 antdiv->get_stats = true; 6205 state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL); 6206 } else { 6207 antdiv->get_stats = false; 6208 state_period = msecs_to_jiffies(ANTDIV_DELAY); 6209 6210 rtw89_phy_swap_hal_antenna(rtwdev); 6211 rtw89_phy_antdiv_set_ant(rtwdev); 6212 } 6213 6214 antdiv->training_count++; 6215 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 6216 state_period); 6217 } 6218 6219 void rtw89_phy_antdiv_work(struct work_struct *work) 6220 { 6221 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6222 antdiv_work.work); 6223 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6224 6225 mutex_lock(&rtwdev->mutex); 6226 6227 if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) { 6228 rtw89_phy_antdiv_training_state(rtwdev); 6229 } else { 6230 rtw89_phy_antdiv_decision_state(rtwdev); 6231 rtw89_phy_antdiv_set_ant(rtwdev); 6232 } 6233 6234 mutex_unlock(&rtwdev->mutex); 6235 } 6236 6237 void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev) 6238 { 6239 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6240 struct rtw89_hal *hal = &rtwdev->hal; 6241 u8 rssi, rssi_pre; 6242 6243 if (!hal->ant_diversity || hal->ant_diversity_fixed) 6244 return; 6245 6246 rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats); 6247 rssi_pre = antdiv->rssi_pre; 6248 antdiv->rssi_pre = rssi; 6249 rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats); 6250 6251 if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH) 6252 return; 6253 6254 antdiv->training_count = 0; 6255 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0); 6256 } 6257 6258 static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev) 6259 { 6260 rtw89_phy_ccx_top_setting_init(rtwdev); 6261 rtw89_phy_ifs_clm_setting_init(rtwdev); 6262 } 6263 6264 static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev) 6265 { 6266 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6267 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6268 6269 memset(edcca_bak, 0, sizeof(*edcca_bak)); 6270 6271 if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) { 6272 rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0); 6273 rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2); 6274 rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1); 6275 rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0); 6276 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0); 6277 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0); 6278 rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0); 6279 rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1); 6280 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1); 6281 } 6282 6283 rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st, 6284 edcca_regs->tx_collision_t2r_st_mask, 0x29); 6285 } 6286 6287 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) 6288 { 6289 rtw89_phy_stat_init(rtwdev); 6290 6291 rtw89_chip_bb_sethw(rtwdev); 6292 6293 rtw89_phy_env_monitor_init(rtwdev); 6294 rtw89_physts_parsing_init(rtwdev); 6295 rtw89_phy_dig_init(rtwdev); 6296 rtw89_phy_cfo_init(rtwdev); 6297 rtw89_phy_bb_wrap_init(rtwdev); 6298 rtw89_phy_edcca_init(rtwdev); 6299 rtw89_phy_ch_info_init(rtwdev); 6300 rtw89_phy_ul_tb_info_init(rtwdev); 6301 rtw89_phy_antdiv_init(rtwdev); 6302 rtw89_chip_rfe_gpio(rtwdev); 6303 rtw89_phy_antdiv_set_ant(rtwdev); 6304 6305 rtw89_chip_rfk_hw_init(rtwdev); 6306 rtw89_phy_init_rf_nctl(rtwdev); 6307 rtw89_chip_rfk_init(rtwdev); 6308 rtw89_chip_set_txpwr_ctrl(rtwdev); 6309 rtw89_chip_power_trim(rtwdev); 6310 rtw89_chip_cfg_txrx_path(rtwdev); 6311 } 6312 6313 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, 6314 struct rtw89_vif_link *rtwvif_link) 6315 { 6316 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 6317 const struct rtw89_chip_info *chip = rtwdev->chip; 6318 const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld; 6319 enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx; 6320 struct ieee80211_bss_conf *bss_conf; 6321 u8 bss_color; 6322 6323 rcu_read_lock(); 6324 6325 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 6326 if (!bss_conf->he_support || !vif->cfg.assoc) { 6327 rcu_read_unlock(); 6328 return; 6329 } 6330 6331 bss_color = bss_conf->he_bss_color.color; 6332 6333 rcu_read_unlock(); 6334 6335 rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1, 6336 phy_idx); 6337 rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT, 6338 bss_color, phy_idx); 6339 rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID, 6340 vif->cfg.aid, phy_idx); 6341 } 6342 6343 static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc) 6344 { 6345 return desc->ch != 0; 6346 } 6347 6348 static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc, 6349 const struct rtw89_chan *chan) 6350 { 6351 if (!rfk_chan_validate_desc(desc)) 6352 return false; 6353 6354 if (desc->ch != chan->channel) 6355 return false; 6356 6357 if (desc->has_band && desc->band != chan->band_type) 6358 return false; 6359 6360 if (desc->has_bw && desc->bw != chan->band_width) 6361 return false; 6362 6363 return true; 6364 } 6365 6366 struct rfk_chan_iter_data { 6367 const struct rtw89_rfk_chan_desc desc; 6368 unsigned int found; 6369 }; 6370 6371 static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data) 6372 { 6373 struct rfk_chan_iter_data *iter_data = data; 6374 6375 if (rfk_chan_is_equivalent(&iter_data->desc, chan)) 6376 iter_data->found++; 6377 6378 return 0; 6379 } 6380 6381 u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev, 6382 const struct rtw89_rfk_chan_desc *desc, u8 desc_nr, 6383 const struct rtw89_chan *target_chan) 6384 { 6385 int sel = -1; 6386 u8 i; 6387 6388 for (i = 0; i < desc_nr; i++) { 6389 struct rfk_chan_iter_data iter_data = { 6390 .desc = desc[i], 6391 }; 6392 6393 if (rfk_chan_is_equivalent(&desc[i], target_chan)) 6394 return i; 6395 6396 rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data); 6397 if (!iter_data.found && sel == -1) 6398 sel = i; 6399 } 6400 6401 if (sel == -1) { 6402 rtw89_debug(rtwdev, RTW89_DBG_RFK, 6403 "no idle rfk entry; force replace the first\n"); 6404 sel = 0; 6405 } 6406 6407 return sel; 6408 } 6409 EXPORT_SYMBOL(rtw89_rfk_chan_lookup); 6410 6411 static void 6412 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6413 { 6414 rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data); 6415 } 6416 6417 static void 6418 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6419 { 6420 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); 6421 } 6422 6423 static void 6424 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6425 { 6426 rtw89_phy_write32_set(rtwdev, def->addr, def->mask); 6427 } 6428 6429 static void 6430 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6431 { 6432 rtw89_phy_write32_clr(rtwdev, def->addr, def->mask); 6433 } 6434 6435 static void 6436 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6437 { 6438 udelay(def->data); 6439 } 6440 6441 static void 6442 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = { 6443 [RTW89_RFK_F_WRF] = _rfk_write_rf, 6444 [RTW89_RFK_F_WM] = _rfk_write32_mask, 6445 [RTW89_RFK_F_WS] = _rfk_write32_set, 6446 [RTW89_RFK_F_WC] = _rfk_write32_clr, 6447 [RTW89_RFK_F_DELAY] = _rfk_delay, 6448 }; 6449 6450 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM); 6451 6452 void 6453 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl) 6454 { 6455 const struct rtw89_reg5_def *p = tbl->defs; 6456 const struct rtw89_reg5_def *end = tbl->defs + tbl->size; 6457 6458 for (; p < end; p++) 6459 _rfk_handler[p->flag](rtwdev, p); 6460 } 6461 EXPORT_SYMBOL(rtw89_rfk_parser); 6462 6463 #define RTW89_TSSI_FAST_MODE_NUM 4 6464 6465 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = { 6466 {0xD934, 0xff0000}, 6467 {0xD934, 0xff000000}, 6468 {0xD938, 0xff}, 6469 {0xD934, 0xff00}, 6470 }; 6471 6472 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = { 6473 {0xD930, 0xff0000}, 6474 {0xD930, 0xff000000}, 6475 {0xD934, 0xff}, 6476 {0xD930, 0xff00}, 6477 }; 6478 6479 static 6480 void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev, 6481 enum rtw89_mac_idx mac_idx, 6482 enum rtw89_tssi_bandedge_cfg bandedge_cfg, 6483 u32 val) 6484 { 6485 const struct rtw89_reg_def *regs; 6486 u32 reg; 6487 int i; 6488 6489 if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) 6490 regs = rtw89_tssi_fastmode_regs_flat; 6491 else 6492 regs = rtw89_tssi_fastmode_regs_level; 6493 6494 for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) { 6495 reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); 6496 rtw89_write32_mask(rtwdev, reg, regs[i].mask, val); 6497 } 6498 } 6499 6500 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = { 6501 {0xD91C, 0xff000000}, 6502 {0xD920, 0xff}, 6503 {0xD920, 0xff00}, 6504 {0xD920, 0xff0000}, 6505 {0xD920, 0xff000000}, 6506 {0xD924, 0xff}, 6507 {0xD924, 0xff00}, 6508 {0xD914, 0xff000000}, 6509 {0xD918, 0xff}, 6510 {0xD918, 0xff00}, 6511 {0xD918, 0xff0000}, 6512 {0xD918, 0xff000000}, 6513 {0xD91C, 0xff}, 6514 {0xD91C, 0xff00}, 6515 {0xD91C, 0xff0000}, 6516 }; 6517 6518 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = { 6519 {0xD910, 0xff}, 6520 {0xD910, 0xff00}, 6521 {0xD910, 0xff0000}, 6522 {0xD910, 0xff000000}, 6523 {0xD914, 0xff}, 6524 {0xD914, 0xff00}, 6525 {0xD914, 0xff0000}, 6526 {0xD908, 0xff}, 6527 {0xD908, 0xff00}, 6528 {0xD908, 0xff0000}, 6529 {0xD908, 0xff000000}, 6530 {0xD90C, 0xff}, 6531 {0xD90C, 0xff00}, 6532 {0xD90C, 0xff0000}, 6533 {0xD90C, 0xff000000}, 6534 }; 6535 6536 void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, 6537 enum rtw89_mac_idx mac_idx, 6538 enum rtw89_tssi_bandedge_cfg bandedge_cfg) 6539 { 6540 const struct rtw89_chip_info *chip = rtwdev->chip; 6541 const struct rtw89_reg_def *regs; 6542 const u32 *data; 6543 u32 reg; 6544 int i; 6545 6546 if (bandedge_cfg >= RTW89_TSSI_CFG_NUM) 6547 return; 6548 6549 if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) 6550 regs = rtw89_tssi_bandedge_regs_flat; 6551 else 6552 regs = rtw89_tssi_bandedge_regs_level; 6553 6554 data = chip->tssi_dbw_table->data[bandedge_cfg]; 6555 6556 for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) { 6557 reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); 6558 rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]); 6559 } 6560 6561 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx); 6562 rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg); 6563 6564 rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg, 6565 data[RTW89_TSSI_SBW20]); 6566 } 6567 EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg); 6568 6569 static 6570 const u8 rtw89_ch_base_table[16] = {1, 0xff, 6571 36, 100, 132, 149, 0xff, 6572 1, 33, 65, 97, 129, 161, 193, 225, 0xff}; 6573 #define RTW89_CH_BASE_IDX_2G 0 6574 #define RTW89_CH_BASE_IDX_5G_FIRST 2 6575 #define RTW89_CH_BASE_IDX_5G_LAST 5 6576 #define RTW89_CH_BASE_IDX_6G_FIRST 7 6577 #define RTW89_CH_BASE_IDX_6G_LAST 14 6578 6579 #define RTW89_CH_BASE_IDX_MASK GENMASK(7, 4) 6580 #define RTW89_CH_OFFSET_MASK GENMASK(3, 0) 6581 6582 u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band) 6583 { 6584 u8 chan_idx; 6585 u8 last, first; 6586 u8 idx; 6587 6588 switch (band) { 6589 case RTW89_BAND_2G: 6590 chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) | 6591 FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch); 6592 return chan_idx; 6593 case RTW89_BAND_5G: 6594 first = RTW89_CH_BASE_IDX_5G_FIRST; 6595 last = RTW89_CH_BASE_IDX_5G_LAST; 6596 break; 6597 case RTW89_BAND_6G: 6598 first = RTW89_CH_BASE_IDX_6G_FIRST; 6599 last = RTW89_CH_BASE_IDX_6G_LAST; 6600 break; 6601 default: 6602 rtw89_warn(rtwdev, "Unsupported band %d\n", band); 6603 return 0; 6604 } 6605 6606 for (idx = last; idx >= first; idx--) 6607 if (central_ch >= rtw89_ch_base_table[idx]) 6608 break; 6609 6610 if (idx < first) { 6611 rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch); 6612 return 0; 6613 } 6614 6615 chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) | 6616 FIELD_PREP(RTW89_CH_OFFSET_MASK, 6617 (central_ch - rtw89_ch_base_table[idx]) >> 1); 6618 return chan_idx; 6619 } 6620 EXPORT_SYMBOL(rtw89_encode_chan_idx); 6621 6622 void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx, 6623 u8 *ch, enum nl80211_band *band) 6624 { 6625 u8 idx, offset; 6626 6627 idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx); 6628 offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx); 6629 6630 if (idx == RTW89_CH_BASE_IDX_2G) { 6631 *band = NL80211_BAND_2GHZ; 6632 *ch = offset; 6633 return; 6634 } 6635 6636 *band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ; 6637 *ch = rtw89_ch_base_table[idx] + (offset << 1); 6638 } 6639 EXPORT_SYMBOL(rtw89_decode_chan_idx); 6640 6641 void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan) 6642 { 6643 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6644 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6645 6646 if (scan) { 6647 edcca_bak->a = 6648 rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, 6649 edcca_regs->edcca_mask); 6650 edcca_bak->p = 6651 rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, 6652 edcca_regs->edcca_p_mask); 6653 edcca_bak->ppdu = 6654 rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level, 6655 edcca_regs->ppdu_mask); 6656 6657 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6658 edcca_regs->edcca_mask, EDCCA_MAX); 6659 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6660 edcca_regs->edcca_p_mask, EDCCA_MAX); 6661 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6662 edcca_regs->ppdu_mask, EDCCA_MAX); 6663 } else { 6664 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6665 edcca_regs->edcca_mask, 6666 edcca_bak->a); 6667 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6668 edcca_regs->edcca_p_mask, 6669 edcca_bak->p); 6670 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6671 edcca_regs->ppdu_mask, 6672 edcca_bak->ppdu); 6673 } 6674 } 6675 6676 static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) 6677 { 6678 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6679 bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80; 6680 s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80; 6681 u8 path, per20_bitmap; 6682 u8 pwdb[8]; 6683 u32 tmp; 6684 6685 if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA)) 6686 return; 6687 6688 if (rtwdev->chip->chip_id == RTL8922A) 6689 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6690 edcca_regs->rpt_sel_be_mask, 0); 6691 6692 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6693 edcca_regs->rpt_sel_mask, 0); 6694 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6695 path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK); 6696 flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80); 6697 flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40); 6698 flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20); 6699 flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20); 6700 flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB); 6701 pwdb_s20 = u32_get_bits(tmp, MASKBYTE1); 6702 pwdb_p20 = u32_get_bits(tmp, MASKBYTE2); 6703 pwdb_fb = u32_get_bits(tmp, MASKBYTE3); 6704 6705 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6706 edcca_regs->rpt_sel_mask, 4); 6707 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6708 pwdb_s80 = u32_get_bits(tmp, MASKBYTE1); 6709 pwdb_s40 = u32_get_bits(tmp, MASKBYTE2); 6710 6711 per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a, 6712 MASKBYTE0); 6713 6714 if (rtwdev->chip->chip_id == RTL8922A) { 6715 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6716 edcca_regs->rpt_sel_be_mask, 4); 6717 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6718 pwdb[0] = u32_get_bits(tmp, MASKBYTE3); 6719 pwdb[1] = u32_get_bits(tmp, MASKBYTE2); 6720 pwdb[2] = u32_get_bits(tmp, MASKBYTE1); 6721 pwdb[3] = u32_get_bits(tmp, MASKBYTE0); 6722 6723 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6724 edcca_regs->rpt_sel_be_mask, 5); 6725 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6726 pwdb[4] = u32_get_bits(tmp, MASKBYTE3); 6727 pwdb[5] = u32_get_bits(tmp, MASKBYTE2); 6728 pwdb[6] = u32_get_bits(tmp, MASKBYTE1); 6729 pwdb[7] = u32_get_bits(tmp, MASKBYTE0); 6730 } else { 6731 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6732 edcca_regs->rpt_sel_mask, 0); 6733 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6734 pwdb[0] = u32_get_bits(tmp, MASKBYTE3); 6735 pwdb[1] = u32_get_bits(tmp, MASKBYTE2); 6736 6737 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6738 edcca_regs->rpt_sel_mask, 1); 6739 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6740 pwdb[2] = u32_get_bits(tmp, MASKBYTE3); 6741 pwdb[3] = u32_get_bits(tmp, MASKBYTE2); 6742 6743 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6744 edcca_regs->rpt_sel_mask, 2); 6745 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6746 pwdb[4] = u32_get_bits(tmp, MASKBYTE3); 6747 pwdb[5] = u32_get_bits(tmp, MASKBYTE2); 6748 6749 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6750 edcca_regs->rpt_sel_mask, 3); 6751 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6752 pwdb[6] = u32_get_bits(tmp, MASKBYTE3); 6753 pwdb[7] = u32_get_bits(tmp, MASKBYTE2); 6754 } 6755 6756 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6757 "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap); 6758 6759 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6760 "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n", 6761 pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5], 6762 pwdb[6], pwdb[7]); 6763 6764 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6765 "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n", 6766 path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80); 6767 6768 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6769 "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n", 6770 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80); 6771 } 6772 6773 static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev) 6774 { 6775 struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; 6776 bool is_linked = rtwdev->total_sta_assoc > 0; 6777 u8 rssi_min = ch_info->rssi_min >> 1; 6778 u8 edcca_thre; 6779 6780 if (!is_linked) { 6781 edcca_thre = EDCCA_MAX; 6782 } else { 6783 edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER - 6784 EDCCA_TH_REF; 6785 edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB); 6786 } 6787 6788 return edcca_thre; 6789 } 6790 6791 void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev) 6792 { 6793 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6794 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6795 u8 th; 6796 6797 th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev); 6798 if (th == edcca_bak->th_old) 6799 return; 6800 6801 edcca_bak->th_old = th; 6802 6803 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6804 "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th); 6805 6806 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6807 edcca_regs->edcca_mask, th); 6808 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6809 edcca_regs->edcca_p_mask, th); 6810 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6811 edcca_regs->ppdu_mask, th); 6812 } 6813 6814 void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev) 6815 { 6816 struct rtw89_hal *hal = &rtwdev->hal; 6817 6818 if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA)) 6819 return; 6820 6821 rtw89_phy_edcca_thre_calc(rtwdev); 6822 rtw89_phy_edcca_log(rtwdev); 6823 } 6824 6825 enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, 6826 enum rtw89_phy_idx phy_idx) 6827 { 6828 rtw89_debug(rtwdev, RTW89_DBG_RFK, 6829 "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n", 6830 rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx); 6831 6832 switch (rtwdev->mlo_dbcc_mode) { 6833 case MLO_1_PLUS_1_1RF: 6834 if (phy_idx == RTW89_PHY_0) 6835 return RF_A; 6836 else 6837 return RF_B; 6838 case MLO_1_PLUS_1_2RF: 6839 if (phy_idx == RTW89_PHY_0) 6840 return RF_A; 6841 else 6842 return RF_D; 6843 case MLO_0_PLUS_2_1RF: 6844 case MLO_2_PLUS_0_1RF: 6845 /* for both PHY 0/1 */ 6846 return RF_AB; 6847 case MLO_0_PLUS_2_2RF: 6848 case MLO_2_PLUS_0_2RF: 6849 case MLO_2_PLUS_2_2RF: 6850 default: 6851 if (phy_idx == RTW89_PHY_0) 6852 return RF_AB; 6853 else 6854 return RF_CD; 6855 } 6856 } 6857 EXPORT_SYMBOL(rtw89_phy_get_kpath); 6858 6859 enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev, 6860 enum rtw89_phy_idx phy_idx) 6861 { 6862 rtw89_debug(rtwdev, RTW89_DBG_RFK, 6863 "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n", 6864 rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx); 6865 6866 switch (rtwdev->mlo_dbcc_mode) { 6867 case MLO_1_PLUS_1_1RF: 6868 if (phy_idx == RTW89_PHY_0) 6869 return RF_PATH_A; 6870 else 6871 return RF_PATH_B; 6872 case MLO_1_PLUS_1_2RF: 6873 if (phy_idx == RTW89_PHY_0) 6874 return RF_PATH_A; 6875 else 6876 return RF_PATH_D; 6877 case MLO_0_PLUS_2_1RF: 6878 case MLO_2_PLUS_0_1RF: 6879 if (phy_idx == RTW89_PHY_0) 6880 return RF_PATH_A; 6881 else 6882 return RF_PATH_B; 6883 case MLO_0_PLUS_2_2RF: 6884 case MLO_2_PLUS_0_2RF: 6885 case MLO_2_PLUS_2_2RF: 6886 default: 6887 if (phy_idx == RTW89_PHY_0) 6888 return RF_PATH_A; 6889 else 6890 return RF_PATH_C; 6891 } 6892 } 6893 EXPORT_SYMBOL(rtw89_phy_get_syn_sel); 6894 6895 static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = { 6896 .setting_addr = R_CCX, 6897 .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK, 6898 .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK, 6899 .trig_opt_mask = B_CCX_TRIG_OPT_MSK, 6900 .en_mask = B_CCX_EN_MSK, 6901 .ifs_cnt_addr = R_IFS_COUNTER, 6902 .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK, 6903 .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK, 6904 .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK, 6905 .ifs_collect_en_mask = B_IFS_COLLECT_EN, 6906 .ifs_t1_addr = R_IFS_T1, 6907 .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK, 6908 .ifs_t1_en_mask = B_IFS_T1_EN_MSK, 6909 .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK, 6910 .ifs_t2_addr = R_IFS_T2, 6911 .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK, 6912 .ifs_t2_en_mask = B_IFS_T2_EN_MSK, 6913 .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK, 6914 .ifs_t3_addr = R_IFS_T3, 6915 .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK, 6916 .ifs_t3_en_mask = B_IFS_T3_EN_MSK, 6917 .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK, 6918 .ifs_t4_addr = R_IFS_T4, 6919 .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK, 6920 .ifs_t4_en_mask = B_IFS_T4_EN_MSK, 6921 .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK, 6922 .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT, 6923 .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK, 6924 .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK, 6925 .ifs_clm_cca_addr = R_IFS_CLM_CCA, 6926 .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK, 6927 .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK, 6928 .ifs_clm_fa_addr = R_IFS_CLM_FA, 6929 .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK, 6930 .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK, 6931 .ifs_his_addr = R_IFS_HIS, 6932 .ifs_t4_his_mask = B_IFS_T4_HIS_MSK, 6933 .ifs_t3_his_mask = B_IFS_T3_HIS_MSK, 6934 .ifs_t2_his_mask = B_IFS_T2_HIS_MSK, 6935 .ifs_t1_his_mask = B_IFS_T1_HIS_MSK, 6936 .ifs_avg_l_addr = R_IFS_AVG_L, 6937 .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK, 6938 .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK, 6939 .ifs_avg_h_addr = R_IFS_AVG_H, 6940 .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK, 6941 .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK, 6942 .ifs_cca_l_addr = R_IFS_CCA_L, 6943 .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK, 6944 .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK, 6945 .ifs_cca_h_addr = R_IFS_CCA_H, 6946 .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK, 6947 .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK, 6948 .ifs_total_addr = R_IFSCNT, 6949 .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, 6950 .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, 6951 }; 6952 6953 static const struct rtw89_physts_regs rtw89_physts_regs_ax = { 6954 .setting_addr = R_PLCP_HISTOGRAM, 6955 .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL, 6956 .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, 6957 }; 6958 6959 static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = { 6960 .comp = R_DCFO_WEIGHT, 6961 .weighting_mask = B_DCFO_WEIGHT_MSK, 6962 .comp_seg0 = R_DCFO_OPT, 6963 .valid_0_mask = B_DCFO_OPT_EN, 6964 }; 6965 6966 const struct rtw89_phy_gen_def rtw89_phy_gen_ax = { 6967 .cr_base = 0x10000, 6968 .ccx = &rtw89_ccx_regs_ax, 6969 .physts = &rtw89_physts_regs_ax, 6970 .cfo = &rtw89_cfo_regs_ax, 6971 .phy0_phy1_offset = rtw89_phy0_phy1_offset_ax, 6972 .config_bb_gain = rtw89_phy_config_bb_gain_ax, 6973 .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax, 6974 .bb_wrap_init = NULL, 6975 .ch_info_init = NULL, 6976 6977 .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax, 6978 .set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax, 6979 .set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax, 6980 .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax, 6981 }; 6982 EXPORT_SYMBOL(rtw89_phy_gen_ax); 6983