1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "coex.h" 6 #include "debug.h" 7 #include "fw.h" 8 #include "mac.h" 9 #include "phy.h" 10 #include "ps.h" 11 #include "reg.h" 12 #include "sar.h" 13 #include "txrx.h" 14 #include "util.h" 15 16 static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr) 17 { 18 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 19 20 return phy->phy0_phy1_offset(rtwdev, addr); 21 } 22 23 static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev, 24 const struct rtw89_ra_report *report) 25 { 26 u32 bit_rate = report->bit_rate; 27 28 /* lower than ofdm, do not aggregate */ 29 if (bit_rate < 550) 30 return 1; 31 32 /* avoid AMSDU for legacy rate */ 33 if (report->might_fallback_legacy) 34 return 1; 35 36 /* lower than 20M vht 2ss mcs8, make it small */ 37 if (bit_rate < 1800) 38 return 1200; 39 40 /* lower than 40M vht 2ss mcs9, make it medium */ 41 if (bit_rate < 4000) 42 return 2600; 43 44 /* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */ 45 if (bit_rate < 7000) 46 return 3500; 47 48 return rtwdev->chip->max_amsdu_limit; 49 } 50 51 static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap) 52 { 53 u64 ra_mask = 0; 54 u8 mcs_cap; 55 int i, nss; 56 57 for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) { 58 mcs_cap = mcs_map & 0x3; 59 switch (mcs_cap) { 60 case 2: 61 ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss; 62 break; 63 case 1: 64 ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss; 65 break; 66 case 0: 67 ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss; 68 break; 69 default: 70 break; 71 } 72 } 73 74 return ra_mask; 75 } 76 77 static u64 get_he_ra_mask(struct ieee80211_sta *sta) 78 { 79 struct ieee80211_sta_he_cap cap = sta->deflink.he_cap; 80 u16 mcs_map; 81 82 switch (sta->deflink.bandwidth) { 83 case IEEE80211_STA_RX_BW_160: 84 if (cap.he_cap_elem.phy_cap_info[0] & 85 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 86 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80); 87 else 88 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160); 89 break; 90 default: 91 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80); 92 } 93 94 /* MCS11, MCS9, MCS7 */ 95 return get_mcs_ra_mask(mcs_map, 11, 2); 96 } 97 98 static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss) 99 { 100 u64 nss_mcs_shift; 101 u64 nss_mcs_val; 102 u64 mask = 0; 103 int i, j; 104 u8 nss; 105 106 for (i = 0; i < n_nss; i++) { 107 nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX); 108 if (!nss) 109 continue; 110 111 nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0); 112 113 for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16) 114 mask |= nss_mcs_val << nss_mcs_shift; 115 } 116 117 return mask; 118 } 119 120 static u64 get_eht_ra_mask(struct ieee80211_sta *sta) 121 { 122 struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; 123 struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz; 124 struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss; 125 126 switch (sta->deflink.bandwidth) { 127 case IEEE80211_STA_RX_BW_320: 128 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320; 129 /* MCS 9, 11, 13 */ 130 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 131 case IEEE80211_STA_RX_BW_160: 132 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160; 133 /* MCS 9, 11, 13 */ 134 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 135 case IEEE80211_STA_RX_BW_80: 136 default: 137 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80; 138 /* MCS 9, 11, 13 */ 139 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 140 case IEEE80211_STA_RX_BW_20: 141 mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz; 142 /* MCS 7, 9, 11, 13 */ 143 return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4); 144 } 145 } 146 147 #define RA_FLOOR_TABLE_SIZE 7 148 #define RA_FLOOR_UP_GAP 3 149 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi, 150 u8 ratr_state) 151 { 152 u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100}; 153 u8 rssi_lv = 0; 154 u8 i; 155 156 rssi >>= 1; 157 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 158 if (i >= ratr_state) 159 rssi_lv_t[i] += RA_FLOOR_UP_GAP; 160 if (rssi < rssi_lv_t[i]) { 161 rssi_lv = i; 162 break; 163 } 164 } 165 if (rssi_lv == 0) 166 return 0xffffffffffffffffULL; 167 else if (rssi_lv == 1) 168 return 0xfffffffffffffff0ULL; 169 else if (rssi_lv == 2) 170 return 0xffffffffffffefe0ULL; 171 else if (rssi_lv == 3) 172 return 0xffffffffffffcfc0ULL; 173 else if (rssi_lv == 4) 174 return 0xffffffffffff8f80ULL; 175 else if (rssi_lv >= 5) 176 return 0xffffffffffff0f00ULL; 177 178 return 0xffffffffffffffffULL; 179 } 180 181 static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak) 182 { 183 if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0) 184 ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); 185 186 if (ra_mask == 0) 187 ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); 188 189 return ra_mask; 190 } 191 192 static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 193 const struct rtw89_chan *chan) 194 { 195 struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta); 196 struct cfg80211_bitrate_mask *mask = &rtwsta->mask; 197 enum nl80211_band band; 198 u64 cfg_mask; 199 200 if (!rtwsta->use_cfg_mask) 201 return -1; 202 203 switch (chan->band_type) { 204 case RTW89_BAND_2G: 205 band = NL80211_BAND_2GHZ; 206 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy, 207 RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES); 208 break; 209 case RTW89_BAND_5G: 210 band = NL80211_BAND_5GHZ; 211 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy, 212 RA_MASK_OFDM_RATES); 213 break; 214 case RTW89_BAND_6G: 215 band = NL80211_BAND_6GHZ; 216 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy, 217 RA_MASK_OFDM_RATES); 218 break; 219 default: 220 rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type); 221 return -1; 222 } 223 224 if (sta->deflink.he_cap.has_he) { 225 cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0], 226 RA_MASK_HE_1SS_RATES); 227 cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1], 228 RA_MASK_HE_2SS_RATES); 229 } else if (sta->deflink.vht_cap.vht_supported) { 230 cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], 231 RA_MASK_VHT_1SS_RATES); 232 cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], 233 RA_MASK_VHT_2SS_RATES); 234 } else if (sta->deflink.ht_cap.ht_supported) { 235 cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], 236 RA_MASK_HT_1SS_RATES); 237 cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], 238 RA_MASK_HT_2SS_RATES); 239 } 240 241 return cfg_mask; 242 } 243 244 static const u64 245 rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES, 246 RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES}; 247 static const u64 248 rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES, 249 RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES}; 250 static const u64 251 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES, 252 RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES}; 253 static const u64 254 rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES, 255 RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES}; 256 257 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev, 258 struct rtw89_sta *rtwsta, 259 const struct rtw89_chan *chan, 260 bool *fix_giltf_en, u8 *fix_giltf) 261 { 262 struct cfg80211_bitrate_mask *mask = &rtwsta->mask; 263 u8 band = chan->band_type; 264 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 265 u8 he_gi = mask->control[nl_band].he_gi; 266 u8 he_ltf = mask->control[nl_band].he_ltf; 267 268 if (!rtwsta->use_cfg_mask) 269 return; 270 271 if (he_ltf == 2 && he_gi == 2) { 272 *fix_giltf = RTW89_GILTF_LGI_4XHE32; 273 } else if (he_ltf == 2 && he_gi == 0) { 274 *fix_giltf = RTW89_GILTF_SGI_4XHE08; 275 } else if (he_ltf == 1 && he_gi == 1) { 276 *fix_giltf = RTW89_GILTF_2XHE16; 277 } else if (he_ltf == 1 && he_gi == 0) { 278 *fix_giltf = RTW89_GILTF_2XHE08; 279 } else if (he_ltf == 0 && he_gi == 1) { 280 *fix_giltf = RTW89_GILTF_1XHE16; 281 } else if (he_ltf == 0 && he_gi == 0) { 282 *fix_giltf = RTW89_GILTF_1XHE08; 283 } else { 284 *fix_giltf_en = false; 285 return; 286 } 287 288 *fix_giltf_en = true; 289 } 290 291 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, 292 struct ieee80211_sta *sta, bool csi) 293 { 294 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 295 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 296 struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern; 297 struct rtw89_ra_info *ra = &rtwsta->ra; 298 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 299 rtwvif->sub_entity_idx); 300 struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif); 301 const u64 *high_rate_masks = rtw89_ra_mask_ht_rates; 302 u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi); 303 u64 ra_mask = 0; 304 u64 ra_mask_bak; 305 u8 mode = 0; 306 u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY; 307 u8 bw_mode = 0; 308 u8 stbc_en = 0; 309 u8 ldpc_en = 0; 310 u8 fix_giltf = 0; 311 u8 i; 312 bool sgi = false; 313 bool fix_giltf_en = false; 314 315 memset(ra, 0, sizeof(*ra)); 316 /* Set the ra mask from sta's capability */ 317 if (sta->deflink.eht_cap.has_eht) { 318 mode |= RTW89_RA_MODE_EHT; 319 ra_mask |= get_eht_ra_mask(sta); 320 high_rate_masks = rtw89_ra_mask_eht_rates; 321 } else if (sta->deflink.he_cap.has_he) { 322 mode |= RTW89_RA_MODE_HE; 323 csi_mode = RTW89_RA_RPT_MODE_HE; 324 ra_mask |= get_he_ra_mask(sta); 325 high_rate_masks = rtw89_ra_mask_he_rates; 326 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[2] & 327 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) 328 stbc_en = 1; 329 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[1] & 330 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) 331 ldpc_en = 1; 332 rtw89_phy_ra_gi_ltf(rtwdev, rtwsta, chan, &fix_giltf_en, &fix_giltf); 333 } else if (sta->deflink.vht_cap.vht_supported) { 334 u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); 335 336 mode |= RTW89_RA_MODE_VHT; 337 csi_mode = RTW89_RA_RPT_MODE_VHT; 338 /* MCS9, MCS8, MCS7 */ 339 ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1); 340 high_rate_masks = rtw89_ra_mask_vht_rates; 341 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) 342 stbc_en = 1; 343 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) 344 ldpc_en = 1; 345 } else if (sta->deflink.ht_cap.ht_supported) { 346 mode |= RTW89_RA_MODE_HT; 347 csi_mode = RTW89_RA_RPT_MODE_HT; 348 ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 48) | 349 ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 36) | 350 (sta->deflink.ht_cap.mcs.rx_mask[1] << 24) | 351 (sta->deflink.ht_cap.mcs.rx_mask[0] << 12); 352 high_rate_masks = rtw89_ra_mask_ht_rates; 353 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) 354 stbc_en = 1; 355 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) 356 ldpc_en = 1; 357 } 358 359 switch (chan->band_type) { 360 case RTW89_BAND_2G: 361 ra_mask |= sta->deflink.supp_rates[NL80211_BAND_2GHZ]; 362 if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xf) 363 mode |= RTW89_RA_MODE_CCK; 364 if (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0xff0) 365 mode |= RTW89_RA_MODE_OFDM; 366 break; 367 case RTW89_BAND_5G: 368 ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4; 369 mode |= RTW89_RA_MODE_OFDM; 370 break; 371 case RTW89_BAND_6G: 372 ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_6GHZ] << 4; 373 mode |= RTW89_RA_MODE_OFDM; 374 break; 375 default: 376 rtw89_err(rtwdev, "Unknown band type\n"); 377 break; 378 } 379 380 ra_mask_bak = ra_mask; 381 382 if (mode >= RTW89_RA_MODE_HT) { 383 u64 mask = 0; 384 for (i = 0; i < rtwdev->hal.tx_nss; i++) 385 mask |= high_rate_masks[i]; 386 if (mode & RTW89_RA_MODE_OFDM) 387 mask |= RA_MASK_SUBOFDM_RATES; 388 if (mode & RTW89_RA_MODE_CCK) 389 mask |= RA_MASK_SUBCCK_RATES; 390 ra_mask &= mask; 391 } else if (mode & RTW89_RA_MODE_OFDM) { 392 ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES); 393 } 394 395 if (mode != RTW89_RA_MODE_CCK) 396 ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0); 397 398 ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak); 399 ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan); 400 401 switch (sta->deflink.bandwidth) { 402 case IEEE80211_STA_RX_BW_160: 403 bw_mode = RTW89_CHANNEL_WIDTH_160; 404 sgi = sta->deflink.vht_cap.vht_supported && 405 (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160); 406 break; 407 case IEEE80211_STA_RX_BW_80: 408 bw_mode = RTW89_CHANNEL_WIDTH_80; 409 sgi = sta->deflink.vht_cap.vht_supported && 410 (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); 411 break; 412 case IEEE80211_STA_RX_BW_40: 413 bw_mode = RTW89_CHANNEL_WIDTH_40; 414 sgi = sta->deflink.ht_cap.ht_supported && 415 (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40); 416 break; 417 default: 418 bw_mode = RTW89_CHANNEL_WIDTH_20; 419 sgi = sta->deflink.ht_cap.ht_supported && 420 (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20); 421 break; 422 } 423 424 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[3] & 425 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM) 426 ra->dcm_cap = 1; 427 428 if (rate_pattern->enable && !vif->p2p) { 429 ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta, chan); 430 ra_mask &= rate_pattern->ra_mask; 431 mode = rate_pattern->ra_mode; 432 } 433 434 ra->bw_cap = bw_mode; 435 ra->er_cap = rtwsta->er_cap; 436 ra->mode_ctrl = mode; 437 ra->macid = rtwsta->mac_id; 438 ra->stbc_cap = stbc_en; 439 ra->ldpc_cap = ldpc_en; 440 ra->ss_num = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 441 ra->en_sgi = sgi; 442 ra->ra_mask = ra_mask; 443 ra->fix_giltf_en = fix_giltf_en; 444 ra->fix_giltf = fix_giltf; 445 446 if (!csi) 447 return; 448 449 ra->fixed_csi_rate_en = false; 450 ra->ra_csi_rate_en = true; 451 ra->cr_tbl_sel = false; 452 ra->band_num = rtwvif->phy_idx; 453 ra->csi_bw = bw_mode; 454 ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32; 455 ra->csi_mcs_ss_idx = 5; 456 ra->csi_mode = csi_mode; 457 } 458 459 void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, 460 u32 changed) 461 { 462 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 463 struct rtw89_ra_info *ra = &rtwsta->ra; 464 465 rtw89_phy_ra_sta_update(rtwdev, sta, false); 466 467 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) 468 ra->upd_mask = 1; 469 if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED)) 470 ra->upd_bw_nss_mask = 1; 471 472 rtw89_debug(rtwdev, RTW89_DBG_RA, 473 "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d", 474 ra->macid, 475 ra->bw_cap, 476 ra->ss_num, 477 ra->en_sgi, 478 ra->giltf); 479 480 rtw89_fw_h2c_ra(rtwdev, ra, false); 481 } 482 483 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next, 484 u16 rate_base, u64 ra_mask, u8 ra_mode, 485 u32 rate_ctrl, u32 ctrl_skip, bool force) 486 { 487 u8 n, c; 488 489 if (rate_ctrl == ctrl_skip) 490 return true; 491 492 n = hweight32(rate_ctrl); 493 if (n == 0) 494 return true; 495 496 if (force && n != 1) 497 return false; 498 499 if (next->enable) 500 return false; 501 502 c = __fls(rate_ctrl); 503 next->rate = rate_base + c; 504 next->ra_mode = ra_mode; 505 next->ra_mask = ra_mask; 506 next->enable = true; 507 508 return true; 509 } 510 511 #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \ 512 { \ 513 [RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \ 514 [RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \ 515 } 516 517 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, 518 struct ieee80211_vif *vif, 519 const struct cfg80211_bitrate_mask *mask) 520 { 521 struct ieee80211_supported_band *sband; 522 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 523 struct rtw89_phy_rate_pattern next_pattern = {0}; 524 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 525 rtwvif->sub_entity_idx); 526 static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = { 527 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0), 528 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0), 529 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0), 530 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0), 531 }; 532 static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = { 533 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0), 534 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0), 535 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0), 536 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0), 537 }; 538 static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = { 539 RTW89_HW_RATE_BY_CHIP_GEN(MCS0), 540 RTW89_HW_RATE_BY_CHIP_GEN(MCS8), 541 RTW89_HW_RATE_BY_CHIP_GEN(MCS16), 542 RTW89_HW_RATE_BY_CHIP_GEN(MCS24), 543 }; 544 u8 band = chan->band_type; 545 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 546 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 547 u8 tx_nss = rtwdev->hal.tx_nss; 548 u8 i; 549 550 for (i = 0; i < tx_nss; i++) 551 if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen], 552 RA_MASK_HE_RATES, RTW89_RA_MODE_HE, 553 mask->control[nl_band].he_mcs[i], 554 0, true)) 555 goto out; 556 557 for (i = 0; i < tx_nss; i++) 558 if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen], 559 RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT, 560 mask->control[nl_band].vht_mcs[i], 561 0, true)) 562 goto out; 563 564 for (i = 0; i < tx_nss; i++) 565 if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen], 566 RA_MASK_HT_RATES, RTW89_RA_MODE_HT, 567 mask->control[nl_band].ht_mcs[i], 568 0, true)) 569 goto out; 570 571 /* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and 572 * require at least one basic rate for ieee80211_set_bitrate_mask, 573 * so the decision just depends on if all bitrates are set or not. 574 */ 575 sband = rtwdev->hw->wiphy->bands[nl_band]; 576 if (band == RTW89_BAND_2G) { 577 if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1, 578 RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES, 579 RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM, 580 mask->control[nl_band].legacy, 581 BIT(sband->n_bitrates) - 1, false)) 582 goto out; 583 } else { 584 if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6, 585 RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM, 586 mask->control[nl_band].legacy, 587 BIT(sband->n_bitrates) - 1, false)) 588 goto out; 589 } 590 591 if (!next_pattern.enable) 592 goto out; 593 594 rtwvif->rate_pattern = next_pattern; 595 rtw89_debug(rtwdev, RTW89_DBG_RA, 596 "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n", 597 next_pattern.rate, 598 next_pattern.ra_mask, 599 next_pattern.ra_mode); 600 return; 601 602 out: 603 rtwvif->rate_pattern.enable = false; 604 rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n"); 605 } 606 607 static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta) 608 { 609 struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; 610 611 rtw89_phy_ra_updata_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); 612 } 613 614 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev) 615 { 616 ieee80211_iterate_stations_atomic(rtwdev->hw, 617 rtw89_phy_ra_updata_sta_iter, 618 rtwdev); 619 } 620 621 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta) 622 { 623 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 624 struct rtw89_ra_info *ra = &rtwsta->ra; 625 u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR; 626 bool csi = rtw89_sta_has_beamformer_cap(sta); 627 628 rtw89_phy_ra_sta_update(rtwdev, sta, csi); 629 630 if (rssi > 40) 631 ra->init_rate_lv = 1; 632 else if (rssi > 20) 633 ra->init_rate_lv = 2; 634 else if (rssi > 1) 635 ra->init_rate_lv = 3; 636 else 637 ra->init_rate_lv = 0; 638 ra->upd_all = 1; 639 rtw89_debug(rtwdev, RTW89_DBG_RA, 640 "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d", 641 ra->macid, 642 ra->mode_ctrl, 643 ra->bw_cap, 644 ra->ss_num, 645 ra->init_rate_lv); 646 rtw89_debug(rtwdev, RTW89_DBG_RA, 647 "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d", 648 ra->dcm_cap, 649 ra->er_cap, 650 ra->ldpc_cap, 651 ra->stbc_cap, 652 ra->en_sgi, 653 ra->giltf); 654 655 rtw89_fw_h2c_ra(rtwdev, ra, csi); 656 } 657 658 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev, 659 const struct rtw89_chan *chan, 660 enum rtw89_bandwidth dbw) 661 { 662 enum rtw89_bandwidth cbw = chan->band_width; 663 u8 pri_ch = chan->primary_channel; 664 u8 central_ch = chan->channel; 665 u8 txsc_idx = 0; 666 u8 tmp = 0; 667 668 if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) 669 return txsc_idx; 670 671 switch (cbw) { 672 case RTW89_CHANNEL_WIDTH_40: 673 txsc_idx = pri_ch > central_ch ? 1 : 2; 674 break; 675 case RTW89_CHANNEL_WIDTH_80: 676 if (dbw == RTW89_CHANNEL_WIDTH_20) { 677 if (pri_ch > central_ch) 678 txsc_idx = (pri_ch - central_ch) >> 1; 679 else 680 txsc_idx = ((central_ch - pri_ch) >> 1) + 1; 681 } else { 682 txsc_idx = pri_ch > central_ch ? 9 : 10; 683 } 684 break; 685 case RTW89_CHANNEL_WIDTH_160: 686 if (pri_ch > central_ch) 687 tmp = (pri_ch - central_ch) >> 1; 688 else 689 tmp = ((central_ch - pri_ch) >> 1) + 1; 690 691 if (dbw == RTW89_CHANNEL_WIDTH_20) { 692 txsc_idx = tmp; 693 } else if (dbw == RTW89_CHANNEL_WIDTH_40) { 694 if (tmp == 1 || tmp == 3) 695 txsc_idx = 9; 696 else if (tmp == 5 || tmp == 7) 697 txsc_idx = 11; 698 else if (tmp == 2 || tmp == 4) 699 txsc_idx = 10; 700 else if (tmp == 6 || tmp == 8) 701 txsc_idx = 12; 702 else 703 return 0xff; 704 } else { 705 txsc_idx = pri_ch > central_ch ? 13 : 14; 706 } 707 break; 708 case RTW89_CHANNEL_WIDTH_80_80: 709 if (dbw == RTW89_CHANNEL_WIDTH_20) { 710 if (pri_ch > central_ch) 711 txsc_idx = (10 - (pri_ch - central_ch)) >> 1; 712 else 713 txsc_idx = ((central_ch - pri_ch) >> 1) + 5; 714 } else if (dbw == RTW89_CHANNEL_WIDTH_40) { 715 txsc_idx = pri_ch > central_ch ? 10 : 12; 716 } else { 717 txsc_idx = 14; 718 } 719 break; 720 default: 721 break; 722 } 723 724 return txsc_idx; 725 } 726 EXPORT_SYMBOL(rtw89_phy_get_txsc); 727 728 u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, 729 enum rtw89_bandwidth dbw) 730 { 731 enum rtw89_bandwidth cbw = chan->band_width; 732 u8 pri_ch = chan->primary_channel; 733 u8 central_ch = chan->channel; 734 u8 txsb_idx = 0; 735 736 if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) 737 return txsb_idx; 738 739 switch (cbw) { 740 case RTW89_CHANNEL_WIDTH_40: 741 txsb_idx = pri_ch > central_ch ? 1 : 0; 742 break; 743 case RTW89_CHANNEL_WIDTH_80: 744 if (dbw == RTW89_CHANNEL_WIDTH_20) 745 txsb_idx = (pri_ch - central_ch + 6) / 4; 746 else 747 txsb_idx = pri_ch > central_ch ? 1 : 0; 748 break; 749 case RTW89_CHANNEL_WIDTH_160: 750 if (dbw == RTW89_CHANNEL_WIDTH_20) 751 txsb_idx = (pri_ch - central_ch + 14) / 4; 752 else if (dbw == RTW89_CHANNEL_WIDTH_40) 753 txsb_idx = (pri_ch - central_ch + 12) / 8; 754 else 755 txsb_idx = pri_ch > central_ch ? 1 : 0; 756 break; 757 case RTW89_CHANNEL_WIDTH_320: 758 if (dbw == RTW89_CHANNEL_WIDTH_20) 759 txsb_idx = (pri_ch - central_ch + 30) / 4; 760 else if (dbw == RTW89_CHANNEL_WIDTH_40) 761 txsb_idx = (pri_ch - central_ch + 28) / 8; 762 else if (dbw == RTW89_CHANNEL_WIDTH_80) 763 txsb_idx = (pri_ch - central_ch + 24) / 16; 764 else 765 txsb_idx = pri_ch > central_ch ? 1 : 0; 766 break; 767 default: 768 break; 769 } 770 771 return txsb_idx; 772 } 773 EXPORT_SYMBOL(rtw89_phy_get_txsb); 774 775 static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev) 776 { 777 return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) || 778 !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1); 779 } 780 781 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 782 u32 addr, u32 mask) 783 { 784 const struct rtw89_chip_info *chip = rtwdev->chip; 785 const u32 *base_addr = chip->rf_base_addr; 786 u32 val, direct_addr; 787 788 if (rf_path >= rtwdev->chip->rf_path_num) { 789 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 790 return INV_RF_DATA; 791 } 792 793 addr &= 0xff; 794 direct_addr = base_addr[rf_path] + (addr << 2); 795 mask &= RFREG_MASK; 796 797 val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask); 798 799 return val; 800 } 801 EXPORT_SYMBOL(rtw89_phy_read_rf); 802 803 static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev, 804 enum rtw89_rf_path rf_path, u32 addr, u32 mask) 805 { 806 bool busy; 807 bool done; 808 u32 val; 809 int ret; 810 811 ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy, 812 1, 30, false, rtwdev); 813 if (ret) { 814 rtw89_err(rtwdev, "read rf busy swsi\n"); 815 return INV_RF_DATA; 816 } 817 818 mask &= RFREG_MASK; 819 820 val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) | 821 FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr); 822 rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val); 823 udelay(2); 824 825 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1, 826 30, false, rtwdev, R_SWSI_V1, 827 B_SWSI_R_DATA_DONE_V1); 828 if (ret) { 829 rtw89_err(rtwdev, "read swsi busy\n"); 830 return INV_RF_DATA; 831 } 832 833 return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask); 834 } 835 836 u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 837 u32 addr, u32 mask) 838 { 839 bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr); 840 841 if (rf_path >= rtwdev->chip->rf_path_num) { 842 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 843 return INV_RF_DATA; 844 } 845 846 if (ad_sel) 847 return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask); 848 else 849 return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask); 850 } 851 EXPORT_SYMBOL(rtw89_phy_read_rf_v1); 852 853 static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev, 854 enum rtw89_rf_path rf_path, u32 addr) 855 { 856 static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24}; 857 static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC}; 858 bool busy, done; 859 int ret; 860 u32 val; 861 862 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1); 863 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy, 864 1, 3800, false, 865 rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY); 866 if (ret) { 867 rtw89_warn(rtwdev, "poll HWSI is busy\n"); 868 return INV_RF_DATA; 869 } 870 871 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr); 872 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1); 873 udelay(2); 874 875 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 876 1, 3800, false, 877 rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE); 878 if (ret) { 879 rtw89_warn(rtwdev, "read HWSI is busy\n"); 880 val = INV_RF_DATA; 881 goto out; 882 } 883 884 val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK); 885 out: 886 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0); 887 888 return val; 889 } 890 891 static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev, 892 enum rtw89_rf_path rf_path, u32 addr, u32 mask) 893 { 894 u32 val; 895 896 val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr); 897 898 return (val & mask) >> __ffs(mask); 899 } 900 901 u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 902 u32 addr, u32 mask) 903 { 904 bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK); 905 906 if (rf_path >= rtwdev->chip->rf_path_num) { 907 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 908 return INV_RF_DATA; 909 } 910 911 if (ad_sel) 912 return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask); 913 else 914 return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask); 915 } 916 EXPORT_SYMBOL(rtw89_phy_read_rf_v2); 917 918 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 919 u32 addr, u32 mask, u32 data) 920 { 921 const struct rtw89_chip_info *chip = rtwdev->chip; 922 const u32 *base_addr = chip->rf_base_addr; 923 u32 direct_addr; 924 925 if (rf_path >= rtwdev->chip->rf_path_num) { 926 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 927 return false; 928 } 929 930 addr &= 0xff; 931 direct_addr = base_addr[rf_path] + (addr << 2); 932 mask &= RFREG_MASK; 933 934 rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data); 935 936 /* delay to ensure writing properly */ 937 udelay(1); 938 939 return true; 940 } 941 EXPORT_SYMBOL(rtw89_phy_write_rf); 942 943 static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev, 944 enum rtw89_rf_path rf_path, u32 addr, u32 mask, 945 u32 data) 946 { 947 u8 bit_shift; 948 u32 val; 949 bool busy, b_msk_en = false; 950 int ret; 951 952 ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy, 953 1, 30, false, rtwdev); 954 if (ret) { 955 rtw89_err(rtwdev, "write rf busy swsi\n"); 956 return false; 957 } 958 959 data &= RFREG_MASK; 960 mask &= RFREG_MASK; 961 962 if (mask != RFREG_MASK) { 963 b_msk_en = true; 964 rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK, 965 mask); 966 bit_shift = __ffs(mask); 967 data = (data << bit_shift) & RFREG_MASK; 968 } 969 970 val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) | 971 FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) | 972 FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) | 973 FIELD_PREP(B_SWSI_DATA_VAL_V1, data); 974 975 rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val); 976 977 return true; 978 } 979 980 bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 981 u32 addr, u32 mask, u32 data) 982 { 983 bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr); 984 985 if (rf_path >= rtwdev->chip->rf_path_num) { 986 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 987 return false; 988 } 989 990 if (ad_sel) 991 return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data); 992 else 993 return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data); 994 } 995 EXPORT_SYMBOL(rtw89_phy_write_rf_v1); 996 997 static 998 bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 999 u32 addr, u32 data) 1000 { 1001 static const u32 addr_is_idle[2] = {0x2C24, 0x2D24}; 1002 static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0}; 1003 bool busy; 1004 u32 val; 1005 int ret; 1006 1007 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy, 1008 1, 3800, false, 1009 rtwdev, addr_is_idle[rf_path], BIT(29)); 1010 if (ret) { 1011 rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__); 1012 return false; 1013 } 1014 1015 val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) | 1016 u32_encode_bits(data, B_HWSI_DATA_VAL); 1017 1018 rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val); 1019 1020 return true; 1021 } 1022 1023 static 1024 bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1025 u32 addr, u32 mask, u32 data) 1026 { 1027 u32 val; 1028 1029 if (mask == RFREG_MASK) { 1030 val = data; 1031 } else { 1032 val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr); 1033 val &= ~mask; 1034 val |= (data << __ffs(mask)) & mask; 1035 } 1036 1037 return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val); 1038 } 1039 1040 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1041 u32 addr, u32 mask, u32 data) 1042 { 1043 bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK); 1044 1045 if (rf_path >= rtwdev->chip->rf_path_num) { 1046 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 1047 return INV_RF_DATA; 1048 } 1049 1050 if (ad_sel) 1051 return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data); 1052 else 1053 return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data); 1054 } 1055 EXPORT_SYMBOL(rtw89_phy_write_rf_v2); 1056 1057 static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev) 1058 { 1059 return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1; 1060 } 1061 1062 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev, 1063 enum rtw89_phy_idx phy_idx) 1064 { 1065 const struct rtw89_chip_info *chip = rtwdev->chip; 1066 1067 chip->ops->bb_reset(rtwdev, phy_idx); 1068 } 1069 1070 static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev, 1071 const struct rtw89_reg2_def *reg, 1072 enum rtw89_rf_path rf_path, 1073 void *extra_data) 1074 { 1075 u32 addr; 1076 1077 if (reg->addr == 0xfe) { 1078 mdelay(50); 1079 } else if (reg->addr == 0xfd) { 1080 mdelay(5); 1081 } else if (reg->addr == 0xfc) { 1082 mdelay(1); 1083 } else if (reg->addr == 0xfb) { 1084 udelay(50); 1085 } else if (reg->addr == 0xfa) { 1086 udelay(5); 1087 } else if (reg->addr == 0xf9) { 1088 udelay(1); 1089 } else if (reg->data == BYPASS_CR_DATA) { 1090 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr); 1091 } else { 1092 addr = reg->addr; 1093 1094 if ((uintptr_t)extra_data == RTW89_PHY_1) 1095 addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr); 1096 1097 rtw89_phy_write32(rtwdev, addr, reg->data); 1098 } 1099 } 1100 1101 union rtw89_phy_bb_gain_arg { 1102 u32 addr; 1103 struct { 1104 union { 1105 u8 type; 1106 struct { 1107 u8 rxsc_start:4; 1108 u8 bw:4; 1109 }; 1110 }; 1111 u8 path; 1112 u8 gain_band; 1113 u8 cfg_type; 1114 }; 1115 } __packed; 1116 1117 static void 1118 rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev, 1119 union rtw89_phy_bb_gain_arg arg, u32 data) 1120 { 1121 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1122 u8 type = arg.type; 1123 u8 path = arg.path; 1124 u8 gband = arg.gain_band; 1125 int i; 1126 1127 switch (type) { 1128 case 0: 1129 for (i = 0; i < 4; i++, data >>= 8) 1130 gain->lna_gain[gband][path][i] = data & 0xff; 1131 break; 1132 case 1: 1133 for (i = 4; i < 7; i++, data >>= 8) 1134 gain->lna_gain[gband][path][i] = data & 0xff; 1135 break; 1136 case 2: 1137 for (i = 0; i < 2; i++, data >>= 8) 1138 gain->tia_gain[gband][path][i] = data & 0xff; 1139 break; 1140 default: 1141 rtw89_warn(rtwdev, 1142 "bb gain error {0x%x:0x%x} with unknown type: %d\n", 1143 arg.addr, data, type); 1144 break; 1145 } 1146 } 1147 1148 enum rtw89_phy_bb_rxsc_start_idx { 1149 RTW89_BB_RXSC_START_IDX_FULL = 0, 1150 RTW89_BB_RXSC_START_IDX_20 = 1, 1151 RTW89_BB_RXSC_START_IDX_20_1 = 5, 1152 RTW89_BB_RXSC_START_IDX_40 = 9, 1153 RTW89_BB_RXSC_START_IDX_80 = 13, 1154 }; 1155 1156 static void 1157 rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev, 1158 union rtw89_phy_bb_gain_arg arg, u32 data) 1159 { 1160 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1161 u8 rxsc_start = arg.rxsc_start; 1162 u8 bw = arg.bw; 1163 u8 path = arg.path; 1164 u8 gband = arg.gain_band; 1165 u8 rxsc; 1166 s8 ofst; 1167 int i; 1168 1169 switch (bw) { 1170 case RTW89_CHANNEL_WIDTH_20: 1171 gain->rpl_ofst_20[gband][path] = (s8)data; 1172 break; 1173 case RTW89_CHANNEL_WIDTH_40: 1174 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1175 gain->rpl_ofst_40[gband][path][0] = (s8)data; 1176 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1177 for (i = 0; i < 2; i++, data >>= 8) { 1178 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1179 ofst = (s8)(data & 0xff); 1180 gain->rpl_ofst_40[gband][path][rxsc] = ofst; 1181 } 1182 } 1183 break; 1184 case RTW89_CHANNEL_WIDTH_80: 1185 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1186 gain->rpl_ofst_80[gband][path][0] = (s8)data; 1187 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1188 for (i = 0; i < 4; i++, data >>= 8) { 1189 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1190 ofst = (s8)(data & 0xff); 1191 gain->rpl_ofst_80[gband][path][rxsc] = ofst; 1192 } 1193 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { 1194 for (i = 0; i < 2; i++, data >>= 8) { 1195 rxsc = RTW89_BB_RXSC_START_IDX_40 + i; 1196 ofst = (s8)(data & 0xff); 1197 gain->rpl_ofst_80[gband][path][rxsc] = ofst; 1198 } 1199 } 1200 break; 1201 case RTW89_CHANNEL_WIDTH_160: 1202 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1203 gain->rpl_ofst_160[gband][path][0] = (s8)data; 1204 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1205 for (i = 0; i < 4; i++, data >>= 8) { 1206 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1207 ofst = (s8)(data & 0xff); 1208 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1209 } 1210 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) { 1211 for (i = 0; i < 4; i++, data >>= 8) { 1212 rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i; 1213 ofst = (s8)(data & 0xff); 1214 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1215 } 1216 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { 1217 for (i = 0; i < 4; i++, data >>= 8) { 1218 rxsc = RTW89_BB_RXSC_START_IDX_40 + i; 1219 ofst = (s8)(data & 0xff); 1220 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1221 } 1222 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) { 1223 for (i = 0; i < 2; i++, data >>= 8) { 1224 rxsc = RTW89_BB_RXSC_START_IDX_80 + i; 1225 ofst = (s8)(data & 0xff); 1226 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1227 } 1228 } 1229 break; 1230 default: 1231 rtw89_warn(rtwdev, 1232 "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n", 1233 arg.addr, data, bw); 1234 break; 1235 } 1236 } 1237 1238 static void 1239 rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev, 1240 union rtw89_phy_bb_gain_arg arg, u32 data) 1241 { 1242 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1243 u8 type = arg.type; 1244 u8 path = arg.path; 1245 u8 gband = arg.gain_band; 1246 int i; 1247 1248 switch (type) { 1249 case 0: 1250 for (i = 0; i < 4; i++, data >>= 8) 1251 gain->lna_gain_bypass[gband][path][i] = data & 0xff; 1252 break; 1253 case 1: 1254 for (i = 4; i < 7; i++, data >>= 8) 1255 gain->lna_gain_bypass[gband][path][i] = data & 0xff; 1256 break; 1257 default: 1258 rtw89_warn(rtwdev, 1259 "bb gain bypass {0x%x:0x%x} with unknown type: %d\n", 1260 arg.addr, data, type); 1261 break; 1262 } 1263 } 1264 1265 static void 1266 rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev, 1267 union rtw89_phy_bb_gain_arg arg, u32 data) 1268 { 1269 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1270 u8 type = arg.type; 1271 u8 path = arg.path; 1272 u8 gband = arg.gain_band; 1273 int i; 1274 1275 switch (type) { 1276 case 0: 1277 for (i = 0; i < 4; i++, data >>= 8) 1278 gain->lna_op1db[gband][path][i] = data & 0xff; 1279 break; 1280 case 1: 1281 for (i = 4; i < 7; i++, data >>= 8) 1282 gain->lna_op1db[gband][path][i] = data & 0xff; 1283 break; 1284 case 2: 1285 for (i = 0; i < 4; i++, data >>= 8) 1286 gain->tia_lna_op1db[gband][path][i] = data & 0xff; 1287 break; 1288 case 3: 1289 for (i = 4; i < 8; i++, data >>= 8) 1290 gain->tia_lna_op1db[gband][path][i] = data & 0xff; 1291 break; 1292 default: 1293 rtw89_warn(rtwdev, 1294 "bb gain op1db {0x%x:0x%x} with unknown type: %d\n", 1295 arg.addr, data, type); 1296 break; 1297 } 1298 } 1299 1300 static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev, 1301 const struct rtw89_reg2_def *reg, 1302 enum rtw89_rf_path rf_path, 1303 void *extra_data) 1304 { 1305 const struct rtw89_chip_info *chip = rtwdev->chip; 1306 union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr }; 1307 struct rtw89_efuse *efuse = &rtwdev->efuse; 1308 1309 if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR) 1310 return; 1311 1312 if (arg.path >= chip->rf_path_num) 1313 return; 1314 1315 if (arg.addr >= 0xf9 && arg.addr <= 0xfe) { 1316 rtw89_warn(rtwdev, "bb gain table with flow ctrl\n"); 1317 return; 1318 } 1319 1320 switch (arg.cfg_type) { 1321 case 0: 1322 rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data); 1323 break; 1324 case 1: 1325 rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data); 1326 break; 1327 case 2: 1328 rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data); 1329 break; 1330 case 3: 1331 rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data); 1332 break; 1333 case 4: 1334 /* This cfg_type is only used by rfe_type >= 50 with eFEM */ 1335 if (efuse->rfe_type < 50) 1336 break; 1337 fallthrough; 1338 default: 1339 rtw89_warn(rtwdev, 1340 "bb gain {0x%x:0x%x} with unknown cfg type: %d\n", 1341 arg.addr, reg->data, arg.cfg_type); 1342 break; 1343 } 1344 } 1345 1346 static void 1347 rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, 1348 const struct rtw89_reg2_def *reg, 1349 enum rtw89_rf_path rf_path, 1350 struct rtw89_fw_h2c_rf_reg_info *info) 1351 { 1352 u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE; 1353 u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; 1354 1355 if (page >= RTW89_H2C_RF_PAGE_NUM) { 1356 rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d", 1357 rf_path, info->curr_idx); 1358 return; 1359 } 1360 1361 info->rtw89_phy_config_rf_h2c[page][idx] = 1362 cpu_to_le32((reg->addr << 20) | reg->data); 1363 info->curr_idx++; 1364 } 1365 1366 static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev, 1367 struct rtw89_fw_h2c_rf_reg_info *info) 1368 { 1369 u16 remain = info->curr_idx; 1370 u16 len = 0; 1371 u8 i; 1372 int ret = 0; 1373 1374 if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) { 1375 rtw89_warn(rtwdev, 1376 "rf reg h2c total len %d larger than %d\n", 1377 remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE); 1378 ret = -EINVAL; 1379 goto out; 1380 } 1381 1382 for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) { 1383 len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain; 1384 ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i); 1385 if (ret) 1386 goto out; 1387 } 1388 out: 1389 info->curr_idx = 0; 1390 1391 return ret; 1392 } 1393 1394 static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev, 1395 const struct rtw89_reg2_def *reg, 1396 enum rtw89_rf_path rf_path, 1397 void *extra_data) 1398 { 1399 u32 addr = reg->addr; 1400 1401 if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb || 1402 addr == 0xfa || addr == 0xf9) 1403 return; 1404 1405 if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100) 1406 return; 1407 1408 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1409 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1410 } 1411 1412 static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev, 1413 const struct rtw89_reg2_def *reg, 1414 enum rtw89_rf_path rf_path, 1415 void *extra_data) 1416 { 1417 if (reg->addr == 0xfe) { 1418 mdelay(50); 1419 } else if (reg->addr == 0xfd) { 1420 mdelay(5); 1421 } else if (reg->addr == 0xfc) { 1422 mdelay(1); 1423 } else if (reg->addr == 0xfb) { 1424 udelay(50); 1425 } else if (reg->addr == 0xfa) { 1426 udelay(5); 1427 } else if (reg->addr == 0xf9) { 1428 udelay(1); 1429 } else { 1430 rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data); 1431 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1432 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1433 } 1434 } 1435 1436 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev, 1437 const struct rtw89_reg2_def *reg, 1438 enum rtw89_rf_path rf_path, 1439 void *extra_data) 1440 { 1441 rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data); 1442 1443 if (reg->addr < 0x100) 1444 return; 1445 1446 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1447 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1448 } 1449 EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1); 1450 1451 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev, 1452 const struct rtw89_phy_table *table, 1453 u32 *headline_size, u32 *headline_idx, 1454 u8 rfe, u8 cv) 1455 { 1456 const struct rtw89_reg2_def *reg; 1457 u32 headline; 1458 u32 compare, target; 1459 u8 rfe_para, cv_para; 1460 u8 cv_max = 0; 1461 bool case_matched = false; 1462 u32 i; 1463 1464 for (i = 0; i < table->n_regs; i++) { 1465 reg = &table->regs[i]; 1466 headline = get_phy_headline(reg->addr); 1467 if (headline != PHY_HEADLINE_VALID) 1468 break; 1469 } 1470 *headline_size = i; 1471 if (*headline_size == 0) 1472 return 0; 1473 1474 /* case 1: RFE match, CV match */ 1475 compare = get_phy_compare(rfe, cv); 1476 for (i = 0; i < *headline_size; i++) { 1477 reg = &table->regs[i]; 1478 target = get_phy_target(reg->addr); 1479 if (target == compare) { 1480 *headline_idx = i; 1481 return 0; 1482 } 1483 } 1484 1485 /* case 2: RFE match, CV don't care */ 1486 compare = get_phy_compare(rfe, PHY_COND_DONT_CARE); 1487 for (i = 0; i < *headline_size; i++) { 1488 reg = &table->regs[i]; 1489 target = get_phy_target(reg->addr); 1490 if (target == compare) { 1491 *headline_idx = i; 1492 return 0; 1493 } 1494 } 1495 1496 /* case 3: RFE match, CV max in table */ 1497 for (i = 0; i < *headline_size; i++) { 1498 reg = &table->regs[i]; 1499 rfe_para = get_phy_cond_rfe(reg->addr); 1500 cv_para = get_phy_cond_cv(reg->addr); 1501 if (rfe_para == rfe) { 1502 if (cv_para >= cv_max) { 1503 cv_max = cv_para; 1504 *headline_idx = i; 1505 case_matched = true; 1506 } 1507 } 1508 } 1509 1510 if (case_matched) 1511 return 0; 1512 1513 /* case 4: RFE don't care, CV max in table */ 1514 for (i = 0; i < *headline_size; i++) { 1515 reg = &table->regs[i]; 1516 rfe_para = get_phy_cond_rfe(reg->addr); 1517 cv_para = get_phy_cond_cv(reg->addr); 1518 if (rfe_para == PHY_COND_DONT_CARE) { 1519 if (cv_para >= cv_max) { 1520 cv_max = cv_para; 1521 *headline_idx = i; 1522 case_matched = true; 1523 } 1524 } 1525 } 1526 1527 if (case_matched) 1528 return 0; 1529 1530 return -EINVAL; 1531 } 1532 1533 static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev, 1534 const struct rtw89_phy_table *table, 1535 void (*config)(struct rtw89_dev *rtwdev, 1536 const struct rtw89_reg2_def *reg, 1537 enum rtw89_rf_path rf_path, 1538 void *data), 1539 void *extra_data) 1540 { 1541 const struct rtw89_reg2_def *reg; 1542 enum rtw89_rf_path rf_path = table->rf_path; 1543 u8 rfe = rtwdev->efuse.rfe_type; 1544 u8 cv = rtwdev->hal.cv; 1545 u32 i; 1546 u32 headline_size = 0, headline_idx = 0; 1547 u32 target = 0, cfg_target; 1548 u8 cond; 1549 bool is_matched = true; 1550 bool target_found = false; 1551 int ret; 1552 1553 ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size, 1554 &headline_idx, rfe, cv); 1555 if (ret) { 1556 rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv); 1557 return; 1558 } 1559 1560 cfg_target = get_phy_target(table->regs[headline_idx].addr); 1561 for (i = headline_size; i < table->n_regs; i++) { 1562 reg = &table->regs[i]; 1563 cond = get_phy_cond(reg->addr); 1564 switch (cond) { 1565 case PHY_COND_BRANCH_IF: 1566 case PHY_COND_BRANCH_ELIF: 1567 target = get_phy_target(reg->addr); 1568 break; 1569 case PHY_COND_BRANCH_ELSE: 1570 is_matched = false; 1571 if (!target_found) { 1572 rtw89_warn(rtwdev, "failed to load CR %x/%x\n", 1573 reg->addr, reg->data); 1574 return; 1575 } 1576 break; 1577 case PHY_COND_BRANCH_END: 1578 is_matched = true; 1579 target_found = false; 1580 break; 1581 case PHY_COND_CHECK: 1582 if (target_found) { 1583 is_matched = false; 1584 break; 1585 } 1586 1587 if (target == cfg_target) { 1588 is_matched = true; 1589 target_found = true; 1590 } else { 1591 is_matched = false; 1592 target_found = false; 1593 } 1594 break; 1595 default: 1596 if (is_matched) 1597 config(rtwdev, reg, rf_path, extra_data); 1598 break; 1599 } 1600 } 1601 } 1602 1603 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev) 1604 { 1605 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1606 const struct rtw89_chip_info *chip = rtwdev->chip; 1607 const struct rtw89_phy_table *bb_table; 1608 const struct rtw89_phy_table *bb_gain_table; 1609 1610 bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table; 1611 rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL); 1612 if (rtwdev->dbcc_en) 1613 rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, 1614 (void *)RTW89_PHY_1); 1615 rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0); 1616 1617 bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table; 1618 if (bb_gain_table) 1619 rtw89_phy_init_reg(rtwdev, bb_gain_table, 1620 chip->phy_def->config_bb_gain, NULL); 1621 rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0); 1622 } 1623 1624 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev) 1625 { 1626 rtw89_phy_write32(rtwdev, 0x8080, 0x4); 1627 udelay(1); 1628 return rtw89_phy_read32(rtwdev, 0x8080); 1629 } 1630 1631 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio) 1632 { 1633 void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg, 1634 enum rtw89_rf_path rf_path, void *data); 1635 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1636 const struct rtw89_chip_info *chip = rtwdev->chip; 1637 const struct rtw89_phy_table *rf_table; 1638 struct rtw89_fw_h2c_rf_reg_info *rf_reg_info; 1639 u8 path; 1640 1641 rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL); 1642 if (!rf_reg_info) 1643 return; 1644 1645 for (path = RF_PATH_A; path < chip->rf_path_num; path++) { 1646 rf_table = elm_info->rf_radio[path] ? 1647 elm_info->rf_radio[path] : chip->rf_table[path]; 1648 rf_reg_info->rf_path = rf_table->rf_path; 1649 if (noio) 1650 config = rtw89_phy_config_rf_reg_noio; 1651 else 1652 config = rf_table->config ? rf_table->config : 1653 rtw89_phy_config_rf_reg; 1654 rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info); 1655 if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info)) 1656 rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n", 1657 rf_reg_info->rf_path); 1658 } 1659 kfree(rf_reg_info); 1660 } 1661 1662 static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev) 1663 { 1664 const struct rtw89_chip_info *chip = rtwdev->chip; 1665 u32 val; 1666 int ret; 1667 1668 /* IQK/DPK clock & reset */ 1669 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3); 1670 rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1); 1671 rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000); 1672 if (chip->chip_id != RTL8851B) 1673 rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000); 1674 if (chip->chip_id == RTL8852B) 1675 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2); 1676 1677 /* check 0x8080 */ 1678 rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8); 1679 1680 ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10, 1681 1000, false, rtwdev); 1682 if (ret) 1683 rtw89_err(rtwdev, "failed to poll nctl block\n"); 1684 } 1685 1686 static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev) 1687 { 1688 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1689 const struct rtw89_chip_info *chip = rtwdev->chip; 1690 const struct rtw89_phy_table *nctl_table; 1691 1692 rtw89_phy_preinit_rf_nctl(rtwdev); 1693 1694 nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table; 1695 rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL); 1696 1697 if (chip->nctl_post_table) 1698 rtw89_rfk_parser(rtwdev, chip->nctl_post_table); 1699 } 1700 1701 static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr) 1702 { 1703 u32 phy_page = addr >> 8; 1704 u32 ofst = 0; 1705 1706 switch (phy_page) { 1707 case 0x6: 1708 case 0x7: 1709 case 0x8: 1710 case 0x9: 1711 case 0xa: 1712 case 0xb: 1713 case 0xc: 1714 case 0xd: 1715 case 0x19: 1716 case 0x1a: 1717 case 0x1b: 1718 ofst = 0x2000; 1719 break; 1720 default: 1721 /* warning case */ 1722 ofst = 0; 1723 break; 1724 } 1725 1726 if (phy_page >= 0x40 && phy_page <= 0x4f) 1727 ofst = 0x2000; 1728 1729 return ofst; 1730 } 1731 1732 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1733 u32 data, enum rtw89_phy_idx phy_idx) 1734 { 1735 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1736 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1737 rtw89_phy_write32_mask(rtwdev, addr, mask, data); 1738 } 1739 EXPORT_SYMBOL(rtw89_phy_write32_idx); 1740 1741 u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1742 enum rtw89_phy_idx phy_idx) 1743 { 1744 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1745 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1746 return rtw89_phy_read32_mask(rtwdev, addr, mask); 1747 } 1748 EXPORT_SYMBOL(rtw89_phy_read32_idx); 1749 1750 void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1751 u32 val) 1752 { 1753 rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0); 1754 1755 if (!rtwdev->dbcc_en) 1756 return; 1757 1758 rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1); 1759 } 1760 EXPORT_SYMBOL(rtw89_phy_set_phy_regs); 1761 1762 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev, 1763 const struct rtw89_phy_reg3_tbl *tbl) 1764 { 1765 const struct rtw89_reg3_def *reg3; 1766 int i; 1767 1768 for (i = 0; i < tbl->size; i++) { 1769 reg3 = &tbl->reg3[i]; 1770 rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data); 1771 } 1772 } 1773 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl); 1774 1775 static const u8 rtw89_rs_idx_num_ax[] = { 1776 [RTW89_RS_CCK] = RTW89_RATE_CCK_NUM, 1777 [RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM, 1778 [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX, 1779 [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM, 1780 [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX, 1781 }; 1782 1783 static const u8 rtw89_rs_nss_num_ax[] = { 1784 [RTW89_RS_CCK] = 1, 1785 [RTW89_RS_OFDM] = 1, 1786 [RTW89_RS_MCS] = RTW89_NSS_NUM, 1787 [RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM, 1788 [RTW89_RS_OFFSET] = 1, 1789 }; 1790 1791 s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev, 1792 struct rtw89_txpwr_byrate *head, 1793 const struct rtw89_rate_desc *desc) 1794 { 1795 switch (desc->rs) { 1796 case RTW89_RS_CCK: 1797 return &head->cck[desc->idx]; 1798 case RTW89_RS_OFDM: 1799 return &head->ofdm[desc->idx]; 1800 case RTW89_RS_MCS: 1801 return &head->mcs[desc->ofdma][desc->nss][desc->idx]; 1802 case RTW89_RS_HEDCM: 1803 return &head->hedcm[desc->ofdma][desc->nss][desc->idx]; 1804 case RTW89_RS_OFFSET: 1805 return &head->offset[desc->idx]; 1806 default: 1807 rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs); 1808 return &head->trap; 1809 } 1810 } 1811 1812 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, 1813 const struct rtw89_txpwr_table *tbl) 1814 { 1815 const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data; 1816 const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size; 1817 struct rtw89_txpwr_byrate *byr_head; 1818 struct rtw89_rate_desc desc = {}; 1819 s8 *byr; 1820 u32 data; 1821 u8 i; 1822 1823 for (; cfg < end; cfg++) { 1824 byr_head = &rtwdev->byr[cfg->band][0]; 1825 desc.rs = cfg->rs; 1826 desc.nss = cfg->nss; 1827 data = cfg->data; 1828 1829 for (i = 0; i < cfg->len; i++, data >>= 8) { 1830 desc.idx = cfg->shf + i; 1831 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 1832 *byr = data & 0xff; 1833 } 1834 } 1835 } 1836 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate); 1837 1838 static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) 1839 { 1840 const struct rtw89_chip_info *chip = rtwdev->chip; 1841 1842 return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac); 1843 } 1844 1845 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, 1846 const struct rtw89_rate_desc *rate_desc) 1847 { 1848 struct rtw89_txpwr_byrate *byr_head; 1849 s8 *byr; 1850 1851 if (rate_desc->rs == RTW89_RS_CCK) 1852 band = RTW89_BAND_2G; 1853 1854 byr_head = &rtwdev->byr[band][bw]; 1855 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc); 1856 1857 return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr); 1858 } 1859 1860 static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g) 1861 { 1862 switch (channel_6g) { 1863 case 1 ... 29: 1864 return (channel_6g - 1) / 2; 1865 case 33 ... 61: 1866 return (channel_6g - 3) / 2; 1867 case 65 ... 93: 1868 return (channel_6g - 5) / 2; 1869 case 97 ... 125: 1870 return (channel_6g - 7) / 2; 1871 case 129 ... 157: 1872 return (channel_6g - 9) / 2; 1873 case 161 ... 189: 1874 return (channel_6g - 11) / 2; 1875 case 193 ... 221: 1876 return (channel_6g - 13) / 2; 1877 case 225 ... 253: 1878 return (channel_6g - 15) / 2; 1879 default: 1880 rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g); 1881 return 0; 1882 } 1883 } 1884 1885 static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel) 1886 { 1887 if (band == RTW89_BAND_6G) 1888 return rtw89_channel_6g_to_idx(rtwdev, channel); 1889 1890 switch (channel) { 1891 case 1 ... 14: 1892 return channel - 1; 1893 case 36 ... 64: 1894 return (channel - 36) / 2; 1895 case 100 ... 144: 1896 return ((channel - 100) / 2) + 15; 1897 case 149 ... 177: 1898 return ((channel - 149) / 2) + 38; 1899 default: 1900 rtw89_warn(rtwdev, "unknown channel: %d\n", channel); 1901 return 0; 1902 } 1903 } 1904 1905 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, 1906 u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch) 1907 { 1908 const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; 1909 const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; 1910 const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; 1911 const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; 1912 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 1913 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 1914 u32 freq = ieee80211_channel_to_frequency(ch, nl_band); 1915 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 1916 u8 regd = rtw89_regd_get(rtwdev, band); 1917 u8 reg6 = regulatory->reg_6ghz_power; 1918 s8 lmt = 0, sar; 1919 1920 switch (band) { 1921 case RTW89_BAND_2G: 1922 lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; 1923 if (lmt) 1924 break; 1925 1926 lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; 1927 break; 1928 case RTW89_BAND_5G: 1929 lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; 1930 if (lmt) 1931 break; 1932 1933 lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; 1934 break; 1935 case RTW89_BAND_6G: 1936 lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx]; 1937 if (lmt) 1938 break; 1939 1940 lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW] 1941 [RTW89_REG_6GHZ_POWER_DFLT] 1942 [ch_idx]; 1943 break; 1944 default: 1945 rtw89_warn(rtwdev, "unknown band type: %d\n", band); 1946 return 0; 1947 } 1948 1949 lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt); 1950 sar = rtw89_query_sar(rtwdev, freq); 1951 1952 return min(lmt, sar); 1953 } 1954 EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit); 1955 1956 #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \ 1957 do { \ 1958 u8 __i; \ 1959 for (__i = 0; __i < RTW89_BF_NUM; __i++) \ 1960 ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \ 1961 band, \ 1962 bw, ntx, \ 1963 rs, __i, \ 1964 (ch)); \ 1965 } while (0) 1966 1967 static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev, 1968 struct rtw89_txpwr_limit_ax *lmt, 1969 u8 band, u8 ntx, u8 ch) 1970 { 1971 __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, 1972 ntx, RTW89_RS_CCK, ch); 1973 __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40, 1974 ntx, RTW89_RS_CCK, ch); 1975 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 1976 ntx, RTW89_RS_OFDM, ch); 1977 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 1978 RTW89_CHANNEL_WIDTH_20, 1979 ntx, RTW89_RS_MCS, ch); 1980 } 1981 1982 static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev, 1983 struct rtw89_txpwr_limit_ax *lmt, 1984 u8 band, u8 ntx, u8 ch, u8 pri_ch) 1985 { 1986 __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, 1987 ntx, RTW89_RS_CCK, ch - 2); 1988 __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40, 1989 ntx, RTW89_RS_CCK, ch); 1990 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 1991 ntx, RTW89_RS_OFDM, pri_ch); 1992 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 1993 RTW89_CHANNEL_WIDTH_20, 1994 ntx, RTW89_RS_MCS, ch - 2); 1995 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 1996 RTW89_CHANNEL_WIDTH_20, 1997 ntx, RTW89_RS_MCS, ch + 2); 1998 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 1999 RTW89_CHANNEL_WIDTH_40, 2000 ntx, RTW89_RS_MCS, ch); 2001 } 2002 2003 static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev, 2004 struct rtw89_txpwr_limit_ax *lmt, 2005 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2006 { 2007 s8 val_0p5_n[RTW89_BF_NUM]; 2008 s8 val_0p5_p[RTW89_BF_NUM]; 2009 u8 i; 2010 2011 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2012 ntx, RTW89_RS_OFDM, pri_ch); 2013 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2014 RTW89_CHANNEL_WIDTH_20, 2015 ntx, RTW89_RS_MCS, ch - 6); 2016 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2017 RTW89_CHANNEL_WIDTH_20, 2018 ntx, RTW89_RS_MCS, ch - 2); 2019 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band, 2020 RTW89_CHANNEL_WIDTH_20, 2021 ntx, RTW89_RS_MCS, ch + 2); 2022 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band, 2023 RTW89_CHANNEL_WIDTH_20, 2024 ntx, RTW89_RS_MCS, ch + 6); 2025 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2026 RTW89_CHANNEL_WIDTH_40, 2027 ntx, RTW89_RS_MCS, ch - 4); 2028 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band, 2029 RTW89_CHANNEL_WIDTH_40, 2030 ntx, RTW89_RS_MCS, ch + 4); 2031 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band, 2032 RTW89_CHANNEL_WIDTH_80, 2033 ntx, RTW89_RS_MCS, ch); 2034 2035 __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40, 2036 ntx, RTW89_RS_MCS, ch - 4); 2037 __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40, 2038 ntx, RTW89_RS_MCS, ch + 4); 2039 2040 for (i = 0; i < RTW89_BF_NUM; i++) 2041 lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); 2042 } 2043 2044 static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev, 2045 struct rtw89_txpwr_limit_ax *lmt, 2046 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2047 { 2048 s8 val_0p5_n[RTW89_BF_NUM]; 2049 s8 val_0p5_p[RTW89_BF_NUM]; 2050 s8 val_2p5_n[RTW89_BF_NUM]; 2051 s8 val_2p5_p[RTW89_BF_NUM]; 2052 u8 i; 2053 2054 /* fill ofdm section */ 2055 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2056 ntx, RTW89_RS_OFDM, pri_ch); 2057 2058 /* fill mcs 20m section */ 2059 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2060 RTW89_CHANNEL_WIDTH_20, 2061 ntx, RTW89_RS_MCS, ch - 14); 2062 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2063 RTW89_CHANNEL_WIDTH_20, 2064 ntx, RTW89_RS_MCS, ch - 10); 2065 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band, 2066 RTW89_CHANNEL_WIDTH_20, 2067 ntx, RTW89_RS_MCS, ch - 6); 2068 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band, 2069 RTW89_CHANNEL_WIDTH_20, 2070 ntx, RTW89_RS_MCS, ch - 2); 2071 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band, 2072 RTW89_CHANNEL_WIDTH_20, 2073 ntx, RTW89_RS_MCS, ch + 2); 2074 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band, 2075 RTW89_CHANNEL_WIDTH_20, 2076 ntx, RTW89_RS_MCS, ch + 6); 2077 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band, 2078 RTW89_CHANNEL_WIDTH_20, 2079 ntx, RTW89_RS_MCS, ch + 10); 2080 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band, 2081 RTW89_CHANNEL_WIDTH_20, 2082 ntx, RTW89_RS_MCS, ch + 14); 2083 2084 /* fill mcs 40m section */ 2085 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2086 RTW89_CHANNEL_WIDTH_40, 2087 ntx, RTW89_RS_MCS, ch - 12); 2088 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band, 2089 RTW89_CHANNEL_WIDTH_40, 2090 ntx, RTW89_RS_MCS, ch - 4); 2091 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band, 2092 RTW89_CHANNEL_WIDTH_40, 2093 ntx, RTW89_RS_MCS, ch + 4); 2094 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band, 2095 RTW89_CHANNEL_WIDTH_40, 2096 ntx, RTW89_RS_MCS, ch + 12); 2097 2098 /* fill mcs 80m section */ 2099 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band, 2100 RTW89_CHANNEL_WIDTH_80, 2101 ntx, RTW89_RS_MCS, ch - 8); 2102 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band, 2103 RTW89_CHANNEL_WIDTH_80, 2104 ntx, RTW89_RS_MCS, ch + 8); 2105 2106 /* fill mcs 160m section */ 2107 __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band, 2108 RTW89_CHANNEL_WIDTH_160, 2109 ntx, RTW89_RS_MCS, ch); 2110 2111 /* fill mcs 40m 0p5 section */ 2112 __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40, 2113 ntx, RTW89_RS_MCS, ch - 4); 2114 __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40, 2115 ntx, RTW89_RS_MCS, ch + 4); 2116 2117 for (i = 0; i < RTW89_BF_NUM; i++) 2118 lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); 2119 2120 /* fill mcs 40m 2p5 section */ 2121 __fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40, 2122 ntx, RTW89_RS_MCS, ch - 8); 2123 __fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40, 2124 ntx, RTW89_RS_MCS, ch + 8); 2125 2126 for (i = 0; i < RTW89_BF_NUM; i++) 2127 lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]); 2128 } 2129 2130 static 2131 void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev, 2132 const struct rtw89_chan *chan, 2133 struct rtw89_txpwr_limit_ax *lmt, 2134 u8 ntx) 2135 { 2136 u8 band = chan->band_type; 2137 u8 pri_ch = chan->primary_channel; 2138 u8 ch = chan->channel; 2139 u8 bw = chan->band_width; 2140 2141 memset(lmt, 0, sizeof(*lmt)); 2142 2143 switch (bw) { 2144 case RTW89_CHANNEL_WIDTH_20: 2145 rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch); 2146 break; 2147 case RTW89_CHANNEL_WIDTH_40: 2148 rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch, 2149 pri_ch); 2150 break; 2151 case RTW89_CHANNEL_WIDTH_80: 2152 rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch, 2153 pri_ch); 2154 break; 2155 case RTW89_CHANNEL_WIDTH_160: 2156 rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch, 2157 pri_ch); 2158 break; 2159 } 2160 } 2161 2162 s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, 2163 u8 ru, u8 ntx, u8 ch) 2164 { 2165 const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; 2166 const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; 2167 const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; 2168 const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; 2169 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2170 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 2171 u32 freq = ieee80211_channel_to_frequency(ch, nl_band); 2172 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2173 u8 regd = rtw89_regd_get(rtwdev, band); 2174 u8 reg6 = regulatory->reg_6ghz_power; 2175 s8 lmt_ru = 0, sar; 2176 2177 switch (band) { 2178 case RTW89_BAND_2G: 2179 lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx]; 2180 if (lmt_ru) 2181 break; 2182 2183 lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; 2184 break; 2185 case RTW89_BAND_5G: 2186 lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx]; 2187 if (lmt_ru) 2188 break; 2189 2190 lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; 2191 break; 2192 case RTW89_BAND_6G: 2193 lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx]; 2194 if (lmt_ru) 2195 break; 2196 2197 lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW] 2198 [RTW89_REG_6GHZ_POWER_DFLT] 2199 [ch_idx]; 2200 break; 2201 default: 2202 rtw89_warn(rtwdev, "unknown band type: %d\n", band); 2203 return 0; 2204 } 2205 2206 lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru); 2207 sar = rtw89_query_sar(rtwdev, freq); 2208 2209 return min(lmt_ru, sar); 2210 } 2211 2212 static void 2213 rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev, 2214 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2215 u8 band, u8 ntx, u8 ch) 2216 { 2217 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2218 RTW89_RU26, 2219 ntx, ch); 2220 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2221 RTW89_RU52, 2222 ntx, ch); 2223 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2224 RTW89_RU106, 2225 ntx, ch); 2226 } 2227 2228 static void 2229 rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev, 2230 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2231 u8 band, u8 ntx, u8 ch) 2232 { 2233 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2234 RTW89_RU26, 2235 ntx, ch - 2); 2236 lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2237 RTW89_RU26, 2238 ntx, ch + 2); 2239 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2240 RTW89_RU52, 2241 ntx, ch - 2); 2242 lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2243 RTW89_RU52, 2244 ntx, ch + 2); 2245 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2246 RTW89_RU106, 2247 ntx, ch - 2); 2248 lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2249 RTW89_RU106, 2250 ntx, ch + 2); 2251 } 2252 2253 static void 2254 rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev, 2255 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2256 u8 band, u8 ntx, u8 ch) 2257 { 2258 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2259 RTW89_RU26, 2260 ntx, ch - 6); 2261 lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2262 RTW89_RU26, 2263 ntx, ch - 2); 2264 lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2265 RTW89_RU26, 2266 ntx, ch + 2); 2267 lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2268 RTW89_RU26, 2269 ntx, ch + 6); 2270 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2271 RTW89_RU52, 2272 ntx, ch - 6); 2273 lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2274 RTW89_RU52, 2275 ntx, ch - 2); 2276 lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2277 RTW89_RU52, 2278 ntx, ch + 2); 2279 lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2280 RTW89_RU52, 2281 ntx, ch + 6); 2282 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2283 RTW89_RU106, 2284 ntx, ch - 6); 2285 lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2286 RTW89_RU106, 2287 ntx, ch - 2); 2288 lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2289 RTW89_RU106, 2290 ntx, ch + 2); 2291 lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2292 RTW89_RU106, 2293 ntx, ch + 6); 2294 } 2295 2296 static void 2297 rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev, 2298 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2299 u8 band, u8 ntx, u8 ch) 2300 { 2301 static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 }; 2302 int i; 2303 2304 static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX); 2305 for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) { 2306 lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2307 RTW89_RU26, 2308 ntx, 2309 ch + ofst[i]); 2310 lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2311 RTW89_RU52, 2312 ntx, 2313 ch + ofst[i]); 2314 lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2315 RTW89_RU106, 2316 ntx, 2317 ch + ofst[i]); 2318 } 2319 } 2320 2321 static 2322 void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, 2323 const struct rtw89_chan *chan, 2324 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2325 u8 ntx) 2326 { 2327 u8 band = chan->band_type; 2328 u8 ch = chan->channel; 2329 u8 bw = chan->band_width; 2330 2331 memset(lmt_ru, 0, sizeof(*lmt_ru)); 2332 2333 switch (bw) { 2334 case RTW89_CHANNEL_WIDTH_20: 2335 rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx, 2336 ch); 2337 break; 2338 case RTW89_CHANNEL_WIDTH_40: 2339 rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx, 2340 ch); 2341 break; 2342 case RTW89_CHANNEL_WIDTH_80: 2343 rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx, 2344 ch); 2345 break; 2346 case RTW89_CHANNEL_WIDTH_160: 2347 rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx, 2348 ch); 2349 break; 2350 } 2351 } 2352 2353 static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev, 2354 const struct rtw89_chan *chan, 2355 enum rtw89_phy_idx phy_idx) 2356 { 2357 u8 max_nss_num = rtwdev->chip->rf_path_num; 2358 static const u8 rs[] = { 2359 RTW89_RS_CCK, 2360 RTW89_RS_OFDM, 2361 RTW89_RS_MCS, 2362 RTW89_RS_HEDCM, 2363 }; 2364 struct rtw89_rate_desc cur = {}; 2365 u8 band = chan->band_type; 2366 u8 ch = chan->channel; 2367 u32 addr, val; 2368 s8 v[4] = {}; 2369 u8 i; 2370 2371 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2372 "[TXPWR] set txpwr byrate with ch=%d\n", ch); 2373 2374 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4); 2375 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4); 2376 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4); 2377 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4); 2378 2379 addr = R_AX_PWR_BY_RATE; 2380 for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) { 2381 for (i = 0; i < ARRAY_SIZE(rs); i++) { 2382 if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]]) 2383 continue; 2384 2385 cur.rs = rs[i]; 2386 for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]]; 2387 cur.idx++) { 2388 v[cur.idx % 4] = 2389 rtw89_phy_read_txpwr_byrate(rtwdev, 2390 band, 0, 2391 &cur); 2392 2393 if ((cur.idx + 1) % 4) 2394 continue; 2395 2396 val = FIELD_PREP(GENMASK(7, 0), v[0]) | 2397 FIELD_PREP(GENMASK(15, 8), v[1]) | 2398 FIELD_PREP(GENMASK(23, 16), v[2]) | 2399 FIELD_PREP(GENMASK(31, 24), v[3]); 2400 2401 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 2402 val); 2403 addr += 4; 2404 } 2405 } 2406 } 2407 } 2408 2409 static 2410 void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev, 2411 const struct rtw89_chan *chan, 2412 enum rtw89_phy_idx phy_idx) 2413 { 2414 struct rtw89_rate_desc desc = { 2415 .nss = RTW89_NSS_1, 2416 .rs = RTW89_RS_OFFSET, 2417 }; 2418 u8 band = chan->band_type; 2419 s8 v[RTW89_RATE_OFFSET_NUM_AX] = {}; 2420 u32 val; 2421 2422 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n"); 2423 2424 for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++) 2425 v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc); 2426 2427 BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5); 2428 val = FIELD_PREP(GENMASK(3, 0), v[0]) | 2429 FIELD_PREP(GENMASK(7, 4), v[1]) | 2430 FIELD_PREP(GENMASK(11, 8), v[2]) | 2431 FIELD_PREP(GENMASK(15, 12), v[3]) | 2432 FIELD_PREP(GENMASK(19, 16), v[4]); 2433 2434 rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL, 2435 GENMASK(19, 0), val); 2436 } 2437 2438 static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev, 2439 const struct rtw89_chan *chan, 2440 enum rtw89_phy_idx phy_idx) 2441 { 2442 u8 max_ntx_num = rtwdev->chip->rf_path_num; 2443 struct rtw89_txpwr_limit_ax lmt; 2444 u8 ch = chan->channel; 2445 u8 bw = chan->band_width; 2446 const s8 *ptr; 2447 u32 addr, val; 2448 u8 i, j; 2449 2450 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2451 "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw); 2452 2453 BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) != 2454 RTW89_TXPWR_LMT_PAGE_SIZE_AX); 2455 2456 addr = R_AX_PWR_LMT; 2457 for (i = 0; i < max_ntx_num; i++) { 2458 rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i); 2459 2460 ptr = (s8 *)&lmt; 2461 for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX; 2462 j += 4, addr += 4, ptr += 4) { 2463 val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | 2464 FIELD_PREP(GENMASK(15, 8), ptr[1]) | 2465 FIELD_PREP(GENMASK(23, 16), ptr[2]) | 2466 FIELD_PREP(GENMASK(31, 24), ptr[3]); 2467 2468 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); 2469 } 2470 } 2471 } 2472 2473 static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, 2474 const struct rtw89_chan *chan, 2475 enum rtw89_phy_idx phy_idx) 2476 { 2477 u8 max_ntx_num = rtwdev->chip->rf_path_num; 2478 struct rtw89_txpwr_limit_ru_ax lmt_ru; 2479 u8 ch = chan->channel; 2480 u8 bw = chan->band_width; 2481 const s8 *ptr; 2482 u32 addr, val; 2483 u8 i, j; 2484 2485 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2486 "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw); 2487 2488 BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) != 2489 RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX); 2490 2491 addr = R_AX_PWR_RU_LMT; 2492 for (i = 0; i < max_ntx_num; i++) { 2493 rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i); 2494 2495 ptr = (s8 *)&lmt_ru; 2496 for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX; 2497 j += 4, addr += 4, ptr += 4) { 2498 val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | 2499 FIELD_PREP(GENMASK(15, 8), ptr[1]) | 2500 FIELD_PREP(GENMASK(23, 16), ptr[2]) | 2501 FIELD_PREP(GENMASK(31, 24), ptr[3]); 2502 2503 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); 2504 } 2505 } 2506 } 2507 2508 struct rtw89_phy_iter_ra_data { 2509 struct rtw89_dev *rtwdev; 2510 struct sk_buff *c2h; 2511 }; 2512 2513 static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) 2514 { 2515 struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data; 2516 struct rtw89_dev *rtwdev = ra_data->rtwdev; 2517 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 2518 const struct rtw89_c2h_ra_rpt *c2h = 2519 (const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data; 2520 struct rtw89_ra_report *ra_report = &rtwsta->ra_report; 2521 const struct rtw89_chip_info *chip = rtwdev->chip; 2522 bool format_v1 = chip->chip_gen == RTW89_CHIP_BE; 2523 u8 mode, rate, bw, giltf, mac_id; 2524 u16 legacy_bitrate; 2525 bool valid; 2526 u8 mcs = 0; 2527 u8 t; 2528 2529 mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID); 2530 if (mac_id != rtwsta->mac_id) 2531 return; 2532 2533 rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS); 2534 bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW); 2535 giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF); 2536 mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL); 2537 2538 if (format_v1) { 2539 t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7); 2540 rate |= u8_encode_bits(t, BIT(7)); 2541 t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2); 2542 bw |= u8_encode_bits(t, BIT(2)); 2543 t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2); 2544 mode |= u8_encode_bits(t, BIT(2)); 2545 } 2546 2547 if (mode == RTW89_RA_RPT_MODE_LEGACY) { 2548 valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate); 2549 if (!valid) 2550 return; 2551 } 2552 2553 memset(&ra_report->txrate, 0, sizeof(ra_report->txrate)); 2554 2555 switch (mode) { 2556 case RTW89_RA_RPT_MODE_LEGACY: 2557 ra_report->txrate.legacy = legacy_bitrate; 2558 break; 2559 case RTW89_RA_RPT_MODE_HT: 2560 ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS; 2561 if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw)) 2562 rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate), 2563 FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate)); 2564 else 2565 rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate); 2566 ra_report->txrate.mcs = rate; 2567 if (giltf) 2568 ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2569 mcs = ra_report->txrate.mcs & 0x07; 2570 break; 2571 case RTW89_RA_RPT_MODE_VHT: 2572 ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; 2573 ra_report->txrate.mcs = format_v1 ? 2574 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : 2575 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); 2576 ra_report->txrate.nss = format_v1 ? 2577 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : 2578 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; 2579 if (giltf) 2580 ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2581 mcs = ra_report->txrate.mcs; 2582 break; 2583 case RTW89_RA_RPT_MODE_HE: 2584 ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS; 2585 ra_report->txrate.mcs = format_v1 ? 2586 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : 2587 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); 2588 ra_report->txrate.nss = format_v1 ? 2589 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : 2590 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; 2591 if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) 2592 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8; 2593 else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) 2594 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6; 2595 else 2596 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2; 2597 mcs = ra_report->txrate.mcs; 2598 break; 2599 case RTW89_RA_RPT_MODE_EHT: 2600 ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS; 2601 ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1); 2602 ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1; 2603 if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) 2604 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8; 2605 else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) 2606 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6; 2607 else 2608 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2; 2609 mcs = ra_report->txrate.mcs; 2610 break; 2611 } 2612 2613 ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw); 2614 ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate); 2615 ra_report->hw_rate = format_v1 ? 2616 u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) | 2617 u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) : 2618 u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) | 2619 u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL); 2620 ra_report->might_fallback_legacy = mcs <= 2; 2621 sta->deflink.agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report); 2622 rtwsta->max_agg_wait = sta->deflink.agg.max_rc_amsdu_len / 1500 - 1; 2623 } 2624 2625 static void 2626 rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2627 { 2628 struct rtw89_phy_iter_ra_data ra_data; 2629 2630 ra_data.rtwdev = rtwdev; 2631 ra_data.c2h = c2h; 2632 ieee80211_iterate_stations_atomic(rtwdev->hw, 2633 rtw89_phy_c2h_ra_rpt_iter, 2634 &ra_data); 2635 } 2636 2637 static 2638 void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev, 2639 struct sk_buff *c2h, u32 len) = { 2640 [RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt, 2641 [RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL, 2642 [RTW89_PHY_C2H_FUNC_TXSTS] = NULL, 2643 }; 2644 2645 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev, 2646 enum rtw89_phy_c2h_rfk_log_func func, 2647 void *content, u16 len) 2648 { 2649 struct rtw89_c2h_rf_txgapk_rpt_log *txgapk; 2650 struct rtw89_c2h_rf_rxdck_rpt_log *rxdck; 2651 struct rtw89_c2h_rf_dack_rpt_log *dack; 2652 struct rtw89_c2h_rf_dpk_rpt_log *dpk; 2653 2654 switch (func) { 2655 case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK: 2656 if (len != sizeof(*dpk)) 2657 goto out; 2658 2659 dpk = content; 2660 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2661 "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n", 2662 dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok); 2663 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2664 "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n", 2665 dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q); 2666 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2667 "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n", 2668 dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov); 2669 return; 2670 case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK: 2671 if (len != sizeof(*dack)) 2672 goto out; 2673 2674 dack = content; 2675 2676 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ver=0x%x 0x%x\n", 2677 dack->fwdack_ver, dack->fwdack_rpt_ver); 2678 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n", 2679 dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]); 2680 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n", 2681 dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]); 2682 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n", 2683 dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]); 2684 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n", 2685 dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]); 2686 2687 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n", 2688 dack->addck2_d[0][0][0], dack->addck2_d[0][0][1]); 2689 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n", 2690 dack->addck2_d[0][1][0], dack->addck2_d[0][1][1]); 2691 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n", 2692 dack->addck2_d[1][0][0], dack->addck2_d[1][0][1]); 2693 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n", 2694 dack->addck2_d[1][1][0], dack->addck2_d[1][1][1]); 2695 2696 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n", 2697 dack->adgaink_d[0][0], dack->adgaink_d[0][1]); 2698 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n", 2699 dack->adgaink_d[1][0], dack->adgaink_d[1][1]); 2700 2701 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 2702 dack->dadck_d[0][0], dack->dadck_d[0][1]); 2703 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", 2704 dack->dadck_d[1][0], dack->dadck_d[1][1]); 2705 2706 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n", 2707 dack->biask_d[0][0]); 2708 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n", 2709 dack->biask_d[1][0]); 2710 2711 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic: %*ph\n", 2712 (int)sizeof(dack->msbk_d[0][0]), dack->msbk_d[0][0]); 2713 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc: %*ph\n", 2714 (int)sizeof(dack->msbk_d[0][1]), dack->msbk_d[0][1]); 2715 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic: %*ph\n", 2716 (int)sizeof(dack->msbk_d[1][0]), dack->msbk_d[1][0]); 2717 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc: %*ph\n", 2718 (int)sizeof(dack->msbk_d[1][1]), dack->msbk_d[1][1]); 2719 return; 2720 case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK: 2721 if (len != sizeof(*rxdck)) 2722 goto out; 2723 2724 rxdck = content; 2725 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2726 "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n", 2727 rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch, 2728 rxdck->timeout); 2729 return; 2730 case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK: 2731 if (len != sizeof(*txgapk)) 2732 goto out; 2733 2734 txgapk = content; 2735 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2736 "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n", 2737 le32_to_cpu(txgapk->r0x8010[0]), 2738 le32_to_cpu(txgapk->r0x8010[1])); 2739 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n", 2740 txgapk->chk_id); 2741 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n", 2742 le32_to_cpu(txgapk->chk_cnt)); 2743 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n", 2744 txgapk->ver); 2745 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt rsv1 = %d\n", 2746 txgapk->rsv1); 2747 2748 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n", 2749 (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]); 2750 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n", 2751 (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]); 2752 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n", 2753 (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]); 2754 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n", 2755 (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]); 2756 return; 2757 default: 2758 break; 2759 } 2760 2761 out: 2762 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2763 "unexpected RFK func %d report log with length %d\n", func, len); 2764 } 2765 2766 static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev, 2767 enum rtw89_phy_c2h_rfk_log_func func, 2768 void *content, u16 len) 2769 { 2770 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 2771 const struct rtw89_c2h_rf_run_log *log = content; 2772 const struct rtw89_fw_element_hdr *elm; 2773 u32 fmt_idx; 2774 u16 offset; 2775 2776 if (sizeof(*log) != len) 2777 return false; 2778 2779 if (!elm_info->rfk_log_fmt) 2780 return false; 2781 2782 elm = elm_info->rfk_log_fmt->elm[func]; 2783 fmt_idx = le32_to_cpu(log->fmt_idx); 2784 if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr) 2785 return false; 2786 2787 offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]); 2788 if (offset == 0) 2789 return false; 2790 2791 rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset], 2792 le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]), 2793 le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3])); 2794 2795 return true; 2796 } 2797 2798 static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 2799 u32 len, enum rtw89_phy_c2h_rfk_log_func func, 2800 const char *rfk_name) 2801 { 2802 struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data; 2803 struct rtw89_c2h_rf_log_hdr *log_hdr; 2804 void *log_ptr = c2h_hdr; 2805 u16 content_len; 2806 u16 chunk_len; 2807 bool handled; 2808 2809 if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK)) 2810 return; 2811 2812 log_ptr += sizeof(*c2h_hdr); 2813 len -= sizeof(*c2h_hdr); 2814 2815 while (len > sizeof(*log_hdr)) { 2816 log_hdr = log_ptr; 2817 content_len = le16_to_cpu(log_hdr->len); 2818 chunk_len = content_len + sizeof(*log_hdr); 2819 2820 if (chunk_len > len) 2821 break; 2822 2823 switch (log_hdr->type) { 2824 case RTW89_RF_RUN_LOG: 2825 handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func, 2826 log_hdr->content, content_len); 2827 if (handled) 2828 break; 2829 2830 rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n", 2831 rfk_name, content_len, log_hdr->content); 2832 break; 2833 case RTW89_RF_RPT_LOG: 2834 rtw89_phy_c2h_rfk_rpt_log(rtwdev, func, 2835 log_hdr->content, content_len); 2836 break; 2837 default: 2838 return; 2839 } 2840 2841 log_ptr += chunk_len; 2842 len -= chunk_len; 2843 } 2844 } 2845 2846 static void 2847 rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2848 { 2849 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 2850 RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK"); 2851 } 2852 2853 static void 2854 rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2855 { 2856 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 2857 RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK"); 2858 } 2859 2860 static void 2861 rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2862 { 2863 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 2864 RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK"); 2865 } 2866 2867 static void 2868 rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2869 { 2870 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 2871 RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK"); 2872 } 2873 2874 static void 2875 rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2876 { 2877 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 2878 RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI"); 2879 } 2880 2881 static void 2882 rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2883 { 2884 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 2885 RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK"); 2886 } 2887 2888 static 2889 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev, 2890 struct sk_buff *c2h, u32 len) = { 2891 [RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk, 2892 [RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk, 2893 [RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack, 2894 [RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck, 2895 [RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi, 2896 [RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk, 2897 }; 2898 2899 static 2900 void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev) 2901 { 2902 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 2903 2904 wait->state = RTW89_RFK_STATE_START; 2905 wait->start_time = ktime_get(); 2906 reinit_completion(&wait->completion); 2907 } 2908 2909 static 2910 int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name, 2911 unsigned int ms) 2912 { 2913 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 2914 unsigned long time_left; 2915 2916 /* Since we can't receive C2H event during SER, use a fixed delay. */ 2917 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) { 2918 fsleep(1000 * ms / 2); 2919 goto out; 2920 } 2921 2922 time_left = wait_for_completion_timeout(&wait->completion, 2923 msecs_to_jiffies(ms)); 2924 if (time_left == 0) { 2925 rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name); 2926 return -ETIMEDOUT; 2927 } else if (wait->state != RTW89_RFK_STATE_OK) { 2928 rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n", 2929 rfk_name, wait->state); 2930 return -EFAULT; 2931 } 2932 2933 out: 2934 rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n", 2935 rfk_name, ktime_ms_delta(ktime_get(), wait->start_time)); 2936 2937 return 0; 2938 } 2939 2940 static void 2941 rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2942 { 2943 const struct rtw89_c2h_rfk_report *report = 2944 (const struct rtw89_c2h_rfk_report *)c2h->data; 2945 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 2946 2947 wait->state = report->state; 2948 wait->version = report->version; 2949 2950 complete(&wait->completion); 2951 2952 rtw89_debug(rtwdev, RTW89_DBG_RFK, 2953 "RFK report state %d with version %d (%*ph)\n", 2954 wait->state, wait->version, 2955 (int)(len - sizeof(report->hdr)), &report->state); 2956 } 2957 2958 static 2959 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev, 2960 struct sk_buff *c2h, u32 len) = { 2961 [RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state, 2962 }; 2963 2964 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func) 2965 { 2966 switch (class) { 2967 case RTW89_PHY_C2H_RFK_LOG: 2968 switch (func) { 2969 case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK: 2970 case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK: 2971 case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK: 2972 case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK: 2973 case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI: 2974 case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK: 2975 return true; 2976 default: 2977 return false; 2978 } 2979 case RTW89_PHY_C2H_RFK_REPORT: 2980 switch (func) { 2981 case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE: 2982 return true; 2983 default: 2984 return false; 2985 } 2986 default: 2987 return false; 2988 } 2989 } 2990 2991 void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 2992 u32 len, u8 class, u8 func) 2993 { 2994 void (*handler)(struct rtw89_dev *rtwdev, 2995 struct sk_buff *c2h, u32 len) = NULL; 2996 2997 switch (class) { 2998 case RTW89_PHY_C2H_CLASS_RA: 2999 if (func < RTW89_PHY_C2H_FUNC_RA_MAX) 3000 handler = rtw89_phy_c2h_ra_handler[func]; 3001 break; 3002 case RTW89_PHY_C2H_RFK_LOG: 3003 if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler)) 3004 handler = rtw89_phy_c2h_rfk_log_handler[func]; 3005 break; 3006 case RTW89_PHY_C2H_RFK_REPORT: 3007 if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler)) 3008 handler = rtw89_phy_c2h_rfk_report_handler[func]; 3009 break; 3010 case RTW89_PHY_C2H_CLASS_DM: 3011 if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY) 3012 return; 3013 fallthrough; 3014 default: 3015 rtw89_info(rtwdev, "c2h class %d not support\n", class); 3016 return; 3017 } 3018 if (!handler) { 3019 rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, 3020 func); 3021 return; 3022 } 3023 handler(rtwdev, skb, len); 3024 } 3025 3026 int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev, 3027 enum rtw89_phy_idx phy_idx, 3028 unsigned int ms) 3029 { 3030 int ret; 3031 3032 rtw89_phy_rfk_report_prep(rtwdev); 3033 3034 ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx); 3035 if (ret) 3036 return ret; 3037 3038 return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms); 3039 } 3040 EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait); 3041 3042 int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev, 3043 enum rtw89_phy_idx phy_idx, 3044 enum rtw89_tssi_mode tssi_mode, 3045 unsigned int ms) 3046 { 3047 int ret; 3048 3049 rtw89_phy_rfk_report_prep(rtwdev); 3050 3051 ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, tssi_mode); 3052 if (ret) 3053 return ret; 3054 3055 return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms); 3056 } 3057 EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait); 3058 3059 int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev, 3060 enum rtw89_phy_idx phy_idx, 3061 unsigned int ms) 3062 { 3063 int ret; 3064 3065 rtw89_phy_rfk_report_prep(rtwdev); 3066 3067 ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx); 3068 if (ret) 3069 return ret; 3070 3071 return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms); 3072 } 3073 EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait); 3074 3075 int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev, 3076 enum rtw89_phy_idx phy_idx, 3077 unsigned int ms) 3078 { 3079 int ret; 3080 3081 rtw89_phy_rfk_report_prep(rtwdev); 3082 3083 ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx); 3084 if (ret) 3085 return ret; 3086 3087 return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms); 3088 } 3089 EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait); 3090 3091 int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev, 3092 enum rtw89_phy_idx phy_idx, 3093 unsigned int ms) 3094 { 3095 int ret; 3096 3097 rtw89_phy_rfk_report_prep(rtwdev); 3098 3099 ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx); 3100 if (ret) 3101 return ret; 3102 3103 return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms); 3104 } 3105 EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait); 3106 3107 int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev, 3108 enum rtw89_phy_idx phy_idx, 3109 unsigned int ms) 3110 { 3111 int ret; 3112 3113 rtw89_phy_rfk_report_prep(rtwdev); 3114 3115 ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx); 3116 if (ret) 3117 return ret; 3118 3119 return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms); 3120 } 3121 EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait); 3122 3123 int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev, 3124 enum rtw89_phy_idx phy_idx, 3125 unsigned int ms) 3126 { 3127 int ret; 3128 3129 rtw89_phy_rfk_report_prep(rtwdev); 3130 3131 ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx); 3132 if (ret) 3133 return ret; 3134 3135 return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms); 3136 } 3137 EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait); 3138 3139 static u32 phy_tssi_get_cck_group(u8 ch) 3140 { 3141 switch (ch) { 3142 case 1 ... 2: 3143 return 0; 3144 case 3 ... 5: 3145 return 1; 3146 case 6 ... 8: 3147 return 2; 3148 case 9 ... 11: 3149 return 3; 3150 case 12 ... 13: 3151 return 4; 3152 case 14: 3153 return 5; 3154 } 3155 3156 return 0; 3157 } 3158 3159 #define PHY_TSSI_EXTRA_GROUP_BIT BIT(31) 3160 #define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx)) 3161 #define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT) 3162 #define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \ 3163 ((group) & ~PHY_TSSI_EXTRA_GROUP_BIT) 3164 #define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \ 3165 (PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 3166 3167 static u32 phy_tssi_get_ofdm_group(u8 ch) 3168 { 3169 switch (ch) { 3170 case 1 ... 2: 3171 return 0; 3172 case 3 ... 5: 3173 return 1; 3174 case 6 ... 8: 3175 return 2; 3176 case 9 ... 11: 3177 return 3; 3178 case 12 ... 14: 3179 return 4; 3180 case 36 ... 40: 3181 return 5; 3182 case 41 ... 43: 3183 return PHY_TSSI_EXTRA_GROUP(5); 3184 case 44 ... 48: 3185 return 6; 3186 case 49 ... 51: 3187 return PHY_TSSI_EXTRA_GROUP(6); 3188 case 52 ... 56: 3189 return 7; 3190 case 57 ... 59: 3191 return PHY_TSSI_EXTRA_GROUP(7); 3192 case 60 ... 64: 3193 return 8; 3194 case 100 ... 104: 3195 return 9; 3196 case 105 ... 107: 3197 return PHY_TSSI_EXTRA_GROUP(9); 3198 case 108 ... 112: 3199 return 10; 3200 case 113 ... 115: 3201 return PHY_TSSI_EXTRA_GROUP(10); 3202 case 116 ... 120: 3203 return 11; 3204 case 121 ... 123: 3205 return PHY_TSSI_EXTRA_GROUP(11); 3206 case 124 ... 128: 3207 return 12; 3208 case 129 ... 131: 3209 return PHY_TSSI_EXTRA_GROUP(12); 3210 case 132 ... 136: 3211 return 13; 3212 case 137 ... 139: 3213 return PHY_TSSI_EXTRA_GROUP(13); 3214 case 140 ... 144: 3215 return 14; 3216 case 149 ... 153: 3217 return 15; 3218 case 154 ... 156: 3219 return PHY_TSSI_EXTRA_GROUP(15); 3220 case 157 ... 161: 3221 return 16; 3222 case 162 ... 164: 3223 return PHY_TSSI_EXTRA_GROUP(16); 3224 case 165 ... 169: 3225 return 17; 3226 case 170 ... 172: 3227 return PHY_TSSI_EXTRA_GROUP(17); 3228 case 173 ... 177: 3229 return 18; 3230 } 3231 3232 return 0; 3233 } 3234 3235 static u32 phy_tssi_get_6g_ofdm_group(u8 ch) 3236 { 3237 switch (ch) { 3238 case 1 ... 5: 3239 return 0; 3240 case 6 ... 8: 3241 return PHY_TSSI_EXTRA_GROUP(0); 3242 case 9 ... 13: 3243 return 1; 3244 case 14 ... 16: 3245 return PHY_TSSI_EXTRA_GROUP(1); 3246 case 17 ... 21: 3247 return 2; 3248 case 22 ... 24: 3249 return PHY_TSSI_EXTRA_GROUP(2); 3250 case 25 ... 29: 3251 return 3; 3252 case 33 ... 37: 3253 return 4; 3254 case 38 ... 40: 3255 return PHY_TSSI_EXTRA_GROUP(4); 3256 case 41 ... 45: 3257 return 5; 3258 case 46 ... 48: 3259 return PHY_TSSI_EXTRA_GROUP(5); 3260 case 49 ... 53: 3261 return 6; 3262 case 54 ... 56: 3263 return PHY_TSSI_EXTRA_GROUP(6); 3264 case 57 ... 61: 3265 return 7; 3266 case 65 ... 69: 3267 return 8; 3268 case 70 ... 72: 3269 return PHY_TSSI_EXTRA_GROUP(8); 3270 case 73 ... 77: 3271 return 9; 3272 case 78 ... 80: 3273 return PHY_TSSI_EXTRA_GROUP(9); 3274 case 81 ... 85: 3275 return 10; 3276 case 86 ... 88: 3277 return PHY_TSSI_EXTRA_GROUP(10); 3278 case 89 ... 93: 3279 return 11; 3280 case 97 ... 101: 3281 return 12; 3282 case 102 ... 104: 3283 return PHY_TSSI_EXTRA_GROUP(12); 3284 case 105 ... 109: 3285 return 13; 3286 case 110 ... 112: 3287 return PHY_TSSI_EXTRA_GROUP(13); 3288 case 113 ... 117: 3289 return 14; 3290 case 118 ... 120: 3291 return PHY_TSSI_EXTRA_GROUP(14); 3292 case 121 ... 125: 3293 return 15; 3294 case 129 ... 133: 3295 return 16; 3296 case 134 ... 136: 3297 return PHY_TSSI_EXTRA_GROUP(16); 3298 case 137 ... 141: 3299 return 17; 3300 case 142 ... 144: 3301 return PHY_TSSI_EXTRA_GROUP(17); 3302 case 145 ... 149: 3303 return 18; 3304 case 150 ... 152: 3305 return PHY_TSSI_EXTRA_GROUP(18); 3306 case 153 ... 157: 3307 return 19; 3308 case 161 ... 165: 3309 return 20; 3310 case 166 ... 168: 3311 return PHY_TSSI_EXTRA_GROUP(20); 3312 case 169 ... 173: 3313 return 21; 3314 case 174 ... 176: 3315 return PHY_TSSI_EXTRA_GROUP(21); 3316 case 177 ... 181: 3317 return 22; 3318 case 182 ... 184: 3319 return PHY_TSSI_EXTRA_GROUP(22); 3320 case 185 ... 189: 3321 return 23; 3322 case 193 ... 197: 3323 return 24; 3324 case 198 ... 200: 3325 return PHY_TSSI_EXTRA_GROUP(24); 3326 case 201 ... 205: 3327 return 25; 3328 case 206 ... 208: 3329 return PHY_TSSI_EXTRA_GROUP(25); 3330 case 209 ... 213: 3331 return 26; 3332 case 214 ... 216: 3333 return PHY_TSSI_EXTRA_GROUP(26); 3334 case 217 ... 221: 3335 return 27; 3336 case 225 ... 229: 3337 return 28; 3338 case 230 ... 232: 3339 return PHY_TSSI_EXTRA_GROUP(28); 3340 case 233 ... 237: 3341 return 29; 3342 case 238 ... 240: 3343 return PHY_TSSI_EXTRA_GROUP(29); 3344 case 241 ... 245: 3345 return 30; 3346 case 246 ... 248: 3347 return PHY_TSSI_EXTRA_GROUP(30); 3348 case 249 ... 253: 3349 return 31; 3350 } 3351 3352 return 0; 3353 } 3354 3355 static u32 phy_tssi_get_trim_group(u8 ch) 3356 { 3357 switch (ch) { 3358 case 1 ... 8: 3359 return 0; 3360 case 9 ... 14: 3361 return 1; 3362 case 36 ... 48: 3363 return 2; 3364 case 49 ... 51: 3365 return PHY_TSSI_EXTRA_GROUP(2); 3366 case 52 ... 64: 3367 return 3; 3368 case 100 ... 112: 3369 return 4; 3370 case 113 ... 115: 3371 return PHY_TSSI_EXTRA_GROUP(4); 3372 case 116 ... 128: 3373 return 5; 3374 case 132 ... 144: 3375 return 6; 3376 case 149 ... 177: 3377 return 7; 3378 } 3379 3380 return 0; 3381 } 3382 3383 static u32 phy_tssi_get_6g_trim_group(u8 ch) 3384 { 3385 switch (ch) { 3386 case 1 ... 13: 3387 return 0; 3388 case 14 ... 16: 3389 return PHY_TSSI_EXTRA_GROUP(0); 3390 case 17 ... 29: 3391 return 1; 3392 case 33 ... 45: 3393 return 2; 3394 case 46 ... 48: 3395 return PHY_TSSI_EXTRA_GROUP(2); 3396 case 49 ... 61: 3397 return 3; 3398 case 65 ... 77: 3399 return 4; 3400 case 78 ... 80: 3401 return PHY_TSSI_EXTRA_GROUP(4); 3402 case 81 ... 93: 3403 return 5; 3404 case 97 ... 109: 3405 return 6; 3406 case 110 ... 112: 3407 return PHY_TSSI_EXTRA_GROUP(6); 3408 case 113 ... 125: 3409 return 7; 3410 case 129 ... 141: 3411 return 8; 3412 case 142 ... 144: 3413 return PHY_TSSI_EXTRA_GROUP(8); 3414 case 145 ... 157: 3415 return 9; 3416 case 161 ... 173: 3417 return 10; 3418 case 174 ... 176: 3419 return PHY_TSSI_EXTRA_GROUP(10); 3420 case 177 ... 189: 3421 return 11; 3422 case 193 ... 205: 3423 return 12; 3424 case 206 ... 208: 3425 return PHY_TSSI_EXTRA_GROUP(12); 3426 case 209 ... 221: 3427 return 13; 3428 case 225 ... 237: 3429 return 14; 3430 case 238 ... 240: 3431 return PHY_TSSI_EXTRA_GROUP(14); 3432 case 241 ... 253: 3433 return 15; 3434 } 3435 3436 return 0; 3437 } 3438 3439 static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev, 3440 enum rtw89_phy_idx phy, 3441 const struct rtw89_chan *chan, 3442 enum rtw89_rf_path path) 3443 { 3444 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3445 enum rtw89_band band = chan->band_type; 3446 u8 ch = chan->channel; 3447 u32 gidx_1st; 3448 u32 gidx_2nd; 3449 s8 de_1st; 3450 s8 de_2nd; 3451 u32 gidx; 3452 s8 val; 3453 3454 if (band == RTW89_BAND_6G) 3455 goto calc_6g; 3456 3457 gidx = phy_tssi_get_ofdm_group(ch); 3458 3459 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3460 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3461 path, gidx); 3462 3463 if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) { 3464 gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3465 gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3466 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 3467 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 3468 val = (de_1st + de_2nd) / 2; 3469 3470 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3471 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3472 path, val, de_1st, de_2nd); 3473 } else { 3474 val = tssi_info->tssi_mcs[path][gidx]; 3475 3476 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3477 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3478 } 3479 3480 return val; 3481 3482 calc_6g: 3483 gidx = phy_tssi_get_6g_ofdm_group(ch); 3484 3485 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3486 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3487 path, gidx); 3488 3489 if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) { 3490 gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3491 gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3492 de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st]; 3493 de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd]; 3494 val = (de_1st + de_2nd) / 2; 3495 3496 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3497 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3498 path, val, de_1st, de_2nd); 3499 } else { 3500 val = tssi_info->tssi_6g_mcs[path][gidx]; 3501 3502 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3503 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3504 } 3505 3506 return val; 3507 } 3508 3509 static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, 3510 enum rtw89_phy_idx phy, 3511 const struct rtw89_chan *chan, 3512 enum rtw89_rf_path path) 3513 { 3514 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3515 enum rtw89_band band = chan->band_type; 3516 u8 ch = chan->channel; 3517 u32 tgidx_1st; 3518 u32 tgidx_2nd; 3519 s8 tde_1st; 3520 s8 tde_2nd; 3521 u32 tgidx; 3522 s8 val; 3523 3524 if (band == RTW89_BAND_6G) 3525 goto calc_6g; 3526 3527 tgidx = phy_tssi_get_trim_group(ch); 3528 3529 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3530 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 3531 path, tgidx); 3532 3533 if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) { 3534 tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3535 tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3536 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 3537 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 3538 val = (tde_1st + tde_2nd) / 2; 3539 3540 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3541 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3542 path, val, tde_1st, tde_2nd); 3543 } else { 3544 val = tssi_info->tssi_trim[path][tgidx]; 3545 3546 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3547 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3548 path, val); 3549 } 3550 3551 return val; 3552 3553 calc_6g: 3554 tgidx = phy_tssi_get_6g_trim_group(ch); 3555 3556 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3557 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 3558 path, tgidx); 3559 3560 if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) { 3561 tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 3562 tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 3563 tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st]; 3564 tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd]; 3565 val = (tde_1st + tde_2nd) / 2; 3566 3567 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3568 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 3569 path, val, tde_1st, tde_2nd); 3570 } else { 3571 val = tssi_info->tssi_trim_6g[path][tgidx]; 3572 3573 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3574 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 3575 path, val); 3576 } 3577 3578 return val; 3579 } 3580 3581 void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev, 3582 enum rtw89_phy_idx phy, 3583 const struct rtw89_chan *chan, 3584 struct rtw89_h2c_rf_tssi *h2c) 3585 { 3586 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3587 u8 ch = chan->channel; 3588 s8 trim_de; 3589 s8 ofdm_de; 3590 s8 cck_de; 3591 u8 gidx; 3592 s8 val; 3593 int i; 3594 3595 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 3596 phy, ch); 3597 3598 for (i = RF_PATH_A; i <= RF_PATH_B; i++) { 3599 trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i); 3600 h2c->curr_tssi_trim_de[i] = trim_de; 3601 3602 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3603 "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de); 3604 3605 gidx = phy_tssi_get_cck_group(ch); 3606 cck_de = tssi_info->tssi_cck[i][gidx]; 3607 val = u32_get_bits(cck_de + trim_de, 0xff); 3608 3609 h2c->curr_tssi_cck_de[i] = 0x0; 3610 h2c->curr_tssi_cck_de_20m[i] = val; 3611 h2c->curr_tssi_cck_de_40m[i] = val; 3612 h2c->curr_tssi_efuse_cck_de[i] = cck_de; 3613 3614 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3615 "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de); 3616 3617 ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i); 3618 val = u32_get_bits(ofdm_de + trim_de, 0xff); 3619 3620 h2c->curr_tssi_ofdm_de[i] = 0x0; 3621 h2c->curr_tssi_ofdm_de_20m[i] = val; 3622 h2c->curr_tssi_ofdm_de_40m[i] = val; 3623 h2c->curr_tssi_ofdm_de_80m[i] = val; 3624 h2c->curr_tssi_ofdm_de_160m[i] = val; 3625 h2c->curr_tssi_ofdm_de_320m[i] = val; 3626 h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de; 3627 3628 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3629 "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de); 3630 } 3631 } 3632 3633 void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev, 3634 enum rtw89_phy_idx phy, 3635 const struct rtw89_chan *chan, 3636 struct rtw89_h2c_rf_tssi *h2c) 3637 { 3638 struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk; 3639 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3640 const s8 *thm_up[RF_PATH_B + 1] = {}; 3641 const s8 *thm_down[RF_PATH_B + 1] = {}; 3642 u8 subband = chan->subband_type; 3643 s8 thm_ofst[128] = {0}; 3644 u8 thermal; 3645 u8 path; 3646 u8 i, j; 3647 3648 switch (subband) { 3649 default: 3650 case RTW89_CH_2G: 3651 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0]; 3652 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0]; 3653 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0]; 3654 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0]; 3655 break; 3656 case RTW89_CH_5G_BAND_1: 3657 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0]; 3658 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0]; 3659 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0]; 3660 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0]; 3661 break; 3662 case RTW89_CH_5G_BAND_3: 3663 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1]; 3664 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1]; 3665 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1]; 3666 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1]; 3667 break; 3668 case RTW89_CH_5G_BAND_4: 3669 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2]; 3670 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2]; 3671 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2]; 3672 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2]; 3673 break; 3674 case RTW89_CH_6G_BAND_IDX0: 3675 case RTW89_CH_6G_BAND_IDX1: 3676 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0]; 3677 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0]; 3678 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0]; 3679 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0]; 3680 break; 3681 case RTW89_CH_6G_BAND_IDX2: 3682 case RTW89_CH_6G_BAND_IDX3: 3683 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1]; 3684 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1]; 3685 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1]; 3686 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1]; 3687 break; 3688 case RTW89_CH_6G_BAND_IDX4: 3689 case RTW89_CH_6G_BAND_IDX5: 3690 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2]; 3691 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2]; 3692 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2]; 3693 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2]; 3694 break; 3695 case RTW89_CH_6G_BAND_IDX6: 3696 case RTW89_CH_6G_BAND_IDX7: 3697 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3]; 3698 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3]; 3699 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3]; 3700 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3]; 3701 break; 3702 } 3703 3704 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3705 "[TSSI] tmeter tbl on subband: %u\n", subband); 3706 3707 for (path = RF_PATH_A; path <= RF_PATH_B; path++) { 3708 thermal = tssi_info->thermal[path]; 3709 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3710 "path: %u, pg thermal: 0x%x\n", path, thermal); 3711 3712 if (thermal == 0xff) { 3713 h2c->pg_thermal[path] = 0x38; 3714 memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path])); 3715 continue; 3716 } 3717 3718 h2c->pg_thermal[path] = thermal; 3719 3720 i = 0; 3721 for (j = 0; j < 64; j++) 3722 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 3723 thm_up[path][i++] : 3724 thm_up[path][DELTA_SWINGIDX_SIZE - 1]; 3725 3726 i = 1; 3727 for (j = 127; j >= 64; j--) 3728 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 3729 -thm_down[path][i++] : 3730 -thm_down[path][DELTA_SWINGIDX_SIZE - 1]; 3731 3732 for (i = 0; i < 128; i += 4) { 3733 h2c->ftable[path][i + 0] = thm_ofst[i + 3]; 3734 h2c->ftable[path][i + 1] = thm_ofst[i + 2]; 3735 h2c->ftable[path][i + 2] = thm_ofst[i + 1]; 3736 h2c->ftable[path][i + 3] = thm_ofst[i + 0]; 3737 3738 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3739 "thm ofst [%x]: %02x %02x %02x %02x\n", 3740 i, thm_ofst[i], thm_ofst[i + 1], 3741 thm_ofst[i + 2], thm_ofst[i + 3]); 3742 } 3743 } 3744 } 3745 3746 static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo) 3747 { 3748 const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info; 3749 u32 reg_mask; 3750 3751 if (sc_xo) 3752 reg_mask = xtal->sc_xo_mask; 3753 else 3754 reg_mask = xtal->sc_xi_mask; 3755 3756 return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask); 3757 } 3758 3759 static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo, 3760 u8 val) 3761 { 3762 const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info; 3763 u32 reg_mask; 3764 3765 if (sc_xo) 3766 reg_mask = xtal->sc_xo_mask; 3767 else 3768 reg_mask = xtal->sc_xi_mask; 3769 3770 rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val); 3771 } 3772 3773 static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev, 3774 u8 crystal_cap, bool force) 3775 { 3776 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 3777 const struct rtw89_chip_info *chip = rtwdev->chip; 3778 u8 sc_xi_val, sc_xo_val; 3779 3780 if (!force && cfo->crystal_cap == crystal_cap) 3781 return; 3782 crystal_cap = clamp_t(u8, crystal_cap, 0, 127); 3783 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) { 3784 rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap); 3785 rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap); 3786 sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true); 3787 sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false); 3788 } else { 3789 rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, 3790 crystal_cap, XTAL_SC_XO_MASK); 3791 rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, 3792 crystal_cap, XTAL_SC_XI_MASK); 3793 rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val); 3794 rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val); 3795 } 3796 cfo->crystal_cap = sc_xi_val; 3797 cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap); 3798 3799 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val); 3800 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val); 3801 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n", 3802 cfo->x_cap_ofst); 3803 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n"); 3804 } 3805 3806 static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev) 3807 { 3808 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 3809 u8 cap; 3810 3811 cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK; 3812 cfo->is_adjust = false; 3813 if (cfo->crystal_cap == cfo->def_x_cap) 3814 return; 3815 cap = cfo->crystal_cap; 3816 cap += (cap > cfo->def_x_cap ? -1 : 1); 3817 rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false); 3818 rtw89_debug(rtwdev, RTW89_DBG_CFO, 3819 "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap, 3820 cfo->def_x_cap); 3821 } 3822 3823 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) 3824 { 3825 const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp; 3826 bool is_linked = rtwdev->total_sta_assoc > 0; 3827 s32 cfo_avg_312; 3828 s32 dcfo_comp_val; 3829 int sign; 3830 3831 if (rtwdev->chip->chip_id == RTL8922A) 3832 return; 3833 3834 if (!is_linked) { 3835 rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n", 3836 is_linked); 3837 return; 3838 } 3839 rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo); 3840 if (curr_cfo == 0) 3841 return; 3842 dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO); 3843 sign = curr_cfo > 0 ? 1 : -1; 3844 cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val; 3845 rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312); 3846 if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) 3847 cfo_avg_312 = -cfo_avg_312; 3848 rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask, 3849 cfo_avg_312); 3850 } 3851 3852 static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev) 3853 { 3854 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 3855 const struct rtw89_chip_info *chip = rtwdev->chip; 3856 const struct rtw89_cfo_regs *cfo = phy->cfo; 3857 3858 rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1); 3859 rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8); 3860 3861 if (chip->chip_gen == RTW89_CHIP_AX) { 3862 if (chip->cfo_hw_comp) { 3863 rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2, 3864 B_AX_PWR_UL_CFO_MASK, 0x6); 3865 } else { 3866 rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); 3867 rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, 3868 B_AX_PWR_UL_CFO_MASK); 3869 } 3870 } 3871 } 3872 3873 static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) 3874 { 3875 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 3876 struct rtw89_efuse *efuse = &rtwdev->efuse; 3877 3878 cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK; 3879 cfo->crystal_cap = cfo->crystal_cap_default; 3880 cfo->def_x_cap = cfo->crystal_cap; 3881 cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f); 3882 cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1); 3883 cfo->is_adjust = false; 3884 cfo->divergence_lock_en = false; 3885 cfo->x_cap_ofst = 0; 3886 cfo->lock_cnt = 0; 3887 cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE; 3888 cfo->apply_compensation = false; 3889 cfo->residual_cfo_acc = 0; 3890 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n", 3891 cfo->crystal_cap_default); 3892 rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true); 3893 rtw89_dcfo_comp_init(rtwdev); 3894 cfo->cfo_timer_ms = 2000; 3895 cfo->cfo_trig_by_timer_en = false; 3896 cfo->phy_cfo_trk_cnt = 0; 3897 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 3898 cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE; 3899 } 3900 3901 static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev, 3902 s32 curr_cfo) 3903 { 3904 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 3905 s8 crystal_cap = cfo->crystal_cap; 3906 s32 cfo_abs = abs(curr_cfo); 3907 int sign; 3908 3909 if (curr_cfo == 0) { 3910 rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); 3911 return; 3912 } 3913 if (!cfo->is_adjust) { 3914 if (cfo_abs > CFO_TRK_ENABLE_TH) 3915 cfo->is_adjust = true; 3916 } else { 3917 if (cfo_abs <= CFO_TRK_STOP_TH) 3918 cfo->is_adjust = false; 3919 } 3920 if (!cfo->is_adjust) { 3921 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n"); 3922 return; 3923 } 3924 sign = curr_cfo > 0 ? 1 : -1; 3925 if (cfo_abs > CFO_TRK_STOP_TH_4) 3926 crystal_cap += 7 * sign; 3927 else if (cfo_abs > CFO_TRK_STOP_TH_3) 3928 crystal_cap += 5 * sign; 3929 else if (cfo_abs > CFO_TRK_STOP_TH_2) 3930 crystal_cap += 3 * sign; 3931 else if (cfo_abs > CFO_TRK_STOP_TH_1) 3932 crystal_cap += 1 * sign; 3933 else 3934 return; 3935 rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false); 3936 rtw89_debug(rtwdev, RTW89_DBG_CFO, 3937 "X_cap{Curr,Default}={0x%x,0x%x}\n", 3938 cfo->crystal_cap, cfo->def_x_cap); 3939 } 3940 3941 static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev) 3942 { 3943 const struct rtw89_chip_info *chip = rtwdev->chip; 3944 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 3945 s32 cfo_khz_all = 0; 3946 s32 cfo_cnt_all = 0; 3947 s32 cfo_all_avg = 0; 3948 u8 i; 3949 3950 if (rtwdev->total_sta_assoc != 1) 3951 return 0; 3952 rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n"); 3953 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 3954 if (cfo->cfo_cnt[i] == 0) 3955 continue; 3956 cfo_khz_all += cfo->cfo_tail[i]; 3957 cfo_cnt_all += cfo->cfo_cnt[i]; 3958 cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all); 3959 cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; 3960 cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft, 3961 cfo_cnt_all); 3962 } 3963 rtw89_debug(rtwdev, RTW89_DBG_CFO, 3964 "CFO track for macid = %d\n", i); 3965 rtw89_debug(rtwdev, RTW89_DBG_CFO, 3966 "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n", 3967 cfo_khz_all, cfo_cnt_all, cfo_all_avg); 3968 return cfo_all_avg; 3969 } 3970 3971 static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev) 3972 { 3973 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 3974 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3975 s32 target_cfo = 0; 3976 s32 cfo_khz_all = 0; 3977 s32 cfo_khz_all_tp_wgt = 0; 3978 s32 cfo_avg = 0; 3979 s32 max_cfo_lb = BIT(31); 3980 s32 min_cfo_ub = GENMASK(30, 0); 3981 u16 cfo_cnt_all = 0; 3982 u8 active_entry_cnt = 0; 3983 u8 sta_cnt = 0; 3984 u32 tp_all = 0; 3985 u8 i; 3986 u8 cfo_tol = 0; 3987 3988 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n"); 3989 if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) { 3990 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n"); 3991 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 3992 if (cfo->cfo_cnt[i] == 0) 3993 continue; 3994 cfo_khz_all += cfo->cfo_tail[i]; 3995 cfo_cnt_all += cfo->cfo_cnt[i]; 3996 cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all); 3997 rtw89_debug(rtwdev, RTW89_DBG_CFO, 3998 "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n", 3999 cfo_khz_all, cfo_cnt_all, cfo_avg); 4000 target_cfo = cfo_avg; 4001 } 4002 } else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) { 4003 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n"); 4004 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4005 if (cfo->cfo_cnt[i] == 0) 4006 continue; 4007 cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], 4008 (s32)cfo->cfo_cnt[i]); 4009 cfo_khz_all += cfo->cfo_avg[i]; 4010 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4011 "Macid=%d, cfo_avg=%d\n", i, 4012 cfo->cfo_avg[i]); 4013 } 4014 sta_cnt = rtwdev->total_sta_assoc; 4015 cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt); 4016 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4017 "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n", 4018 cfo_khz_all, sta_cnt, cfo_avg); 4019 target_cfo = cfo_avg; 4020 } else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) { 4021 rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n"); 4022 cfo_tol = cfo->sta_cfo_tolerance; 4023 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4024 sta_cnt++; 4025 if (cfo->cfo_cnt[i] != 0) { 4026 cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], 4027 (s32)cfo->cfo_cnt[i]); 4028 active_entry_cnt++; 4029 } else { 4030 cfo->cfo_avg[i] = cfo->pre_cfo_avg[i]; 4031 } 4032 max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb); 4033 min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub); 4034 cfo_khz_all += cfo->cfo_avg[i]; 4035 /* need tp for each entry */ 4036 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4037 "[%d] cfo_avg=%d, tp=tbd\n", 4038 i, cfo->cfo_avg[i]); 4039 if (sta_cnt >= rtwdev->total_sta_assoc) 4040 break; 4041 } 4042 tp_all = stats->rx_throughput; /* need tp for each entry */ 4043 cfo_avg = phy_div(cfo_khz_all_tp_wgt, (s32)tp_all); 4044 4045 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n", 4046 sta_cnt); 4047 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n", 4048 active_entry_cnt); 4049 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4050 "Msta cfo with tp_wgt=%d, avg_cfo=%d\n", 4051 cfo_khz_all_tp_wgt, cfo_avg); 4052 rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n", 4053 max_cfo_lb, min_cfo_ub); 4054 if (max_cfo_lb <= min_cfo_ub) { 4055 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4056 "cfo win_size=%d\n", 4057 min_cfo_ub - max_cfo_lb); 4058 target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub); 4059 } else { 4060 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4061 "No intersection of cfo tolerance windows\n"); 4062 target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt); 4063 } 4064 for (i = 0; i < CFO_TRACK_MAX_USER; i++) 4065 cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; 4066 } 4067 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo); 4068 return target_cfo; 4069 } 4070 4071 static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev) 4072 { 4073 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4074 4075 memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail)); 4076 memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt)); 4077 cfo->packet_count = 0; 4078 cfo->packet_count_pre = 0; 4079 cfo->cfo_avg_pre = 0; 4080 } 4081 4082 static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) 4083 { 4084 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4085 s32 new_cfo = 0; 4086 bool x_cap_update = false; 4087 u8 pre_x_cap = cfo->crystal_cap; 4088 u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft; 4089 4090 cfo->dcfo_avg = 0; 4091 rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n", 4092 rtwdev->total_sta_assoc); 4093 if (rtwdev->total_sta_assoc == 0) { 4094 rtw89_phy_cfo_reset(rtwdev); 4095 return; 4096 } 4097 if (cfo->packet_count == 0) { 4098 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n"); 4099 return; 4100 } 4101 if (cfo->packet_count == cfo->packet_count_pre) { 4102 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n"); 4103 return; 4104 } 4105 if (rtwdev->total_sta_assoc == 1) 4106 new_cfo = rtw89_phy_average_cfo_calc(rtwdev); 4107 else 4108 new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev); 4109 if (cfo->divergence_lock_en) { 4110 cfo->lock_cnt++; 4111 if (cfo->lock_cnt > CFO_PERIOD_CNT) { 4112 cfo->divergence_lock_en = false; 4113 cfo->lock_cnt = 0; 4114 } else { 4115 rtw89_phy_cfo_reset(rtwdev); 4116 } 4117 return; 4118 } 4119 if (cfo->crystal_cap >= cfo->x_cap_ub || 4120 cfo->crystal_cap <= cfo->x_cap_lb) { 4121 cfo->divergence_lock_en = true; 4122 rtw89_phy_cfo_reset(rtwdev); 4123 return; 4124 } 4125 4126 rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo); 4127 cfo->cfo_avg_pre = new_cfo; 4128 cfo->dcfo_avg_pre = cfo->dcfo_avg; 4129 x_cap_update = cfo->crystal_cap != pre_x_cap; 4130 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update); 4131 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n", 4132 cfo->def_x_cap, pre_x_cap, cfo->crystal_cap, 4133 cfo->x_cap_ofst); 4134 if (x_cap_update) { 4135 if (cfo->dcfo_avg > 0) 4136 cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft; 4137 else 4138 cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft; 4139 } 4140 rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg); 4141 rtw89_phy_cfo_statistics_reset(rtwdev); 4142 } 4143 4144 void rtw89_phy_cfo_track_work(struct work_struct *work) 4145 { 4146 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 4147 cfo_track_work.work); 4148 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4149 4150 mutex_lock(&rtwdev->mutex); 4151 if (!cfo->cfo_trig_by_timer_en) 4152 goto out; 4153 rtw89_leave_ps_mode(rtwdev); 4154 rtw89_phy_cfo_dm(rtwdev); 4155 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, 4156 msecs_to_jiffies(cfo->cfo_timer_ms)); 4157 out: 4158 mutex_unlock(&rtwdev->mutex); 4159 } 4160 4161 static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev) 4162 { 4163 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4164 4165 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, 4166 msecs_to_jiffies(cfo->cfo_timer_ms)); 4167 } 4168 4169 void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev) 4170 { 4171 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4172 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4173 bool is_ul_ofdma = false, ofdma_acc_en = false; 4174 4175 if (stats->rx_tf_periodic > CFO_TF_CNT_TH) 4176 is_ul_ofdma = true; 4177 if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE && 4178 is_ul_ofdma) 4179 ofdma_acc_en = true; 4180 4181 switch (cfo->phy_cfo_status) { 4182 case RTW89_PHY_DCFO_STATE_NORMAL: 4183 if (stats->tx_throughput >= CFO_TP_UPPER) { 4184 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE; 4185 cfo->cfo_trig_by_timer_en = true; 4186 cfo->cfo_timer_ms = CFO_COMP_PERIOD; 4187 rtw89_phy_cfo_start_work(rtwdev); 4188 } 4189 break; 4190 case RTW89_PHY_DCFO_STATE_ENHANCE: 4191 if (stats->tx_throughput <= CFO_TP_LOWER) 4192 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4193 else if (ofdma_acc_en && 4194 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT) 4195 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD; 4196 else 4197 cfo->phy_cfo_trk_cnt++; 4198 4199 if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) { 4200 cfo->phy_cfo_trk_cnt = 0; 4201 cfo->cfo_trig_by_timer_en = false; 4202 } 4203 break; 4204 case RTW89_PHY_DCFO_STATE_HOLD: 4205 if (stats->tx_throughput <= CFO_TP_LOWER) { 4206 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4207 cfo->phy_cfo_trk_cnt = 0; 4208 cfo->cfo_trig_by_timer_en = false; 4209 } else { 4210 cfo->phy_cfo_trk_cnt++; 4211 } 4212 break; 4213 default: 4214 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4215 cfo->phy_cfo_trk_cnt = 0; 4216 break; 4217 } 4218 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4219 "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n", 4220 stats->tx_throughput, cfo->phy_cfo_status, 4221 cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt, 4222 ewma_thermal_read(&rtwdev->phystat.avg_thermal[0])); 4223 if (cfo->cfo_trig_by_timer_en) 4224 return; 4225 rtw89_phy_cfo_dm(rtwdev); 4226 } 4227 4228 void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, 4229 struct rtw89_rx_phy_ppdu *phy_ppdu) 4230 { 4231 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4232 u8 macid = phy_ppdu->mac_id; 4233 4234 if (macid >= CFO_TRACK_MAX_USER) { 4235 rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid); 4236 return; 4237 } 4238 4239 cfo->cfo_tail[macid] += cfo_val; 4240 cfo->cfo_cnt[macid]++; 4241 cfo->packet_count++; 4242 } 4243 4244 void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 4245 { 4246 const struct rtw89_chip_info *chip = rtwdev->chip; 4247 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 4248 rtwvif->sub_entity_idx); 4249 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4250 4251 if (!chip->ul_tb_waveform_ctrl) 4252 return; 4253 4254 rtwvif->def_tri_idx = 4255 rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG); 4256 4257 if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV) 4258 rtwvif->dyn_tb_bedge_en = false; 4259 else if (chan->band_type >= RTW89_BAND_5G && 4260 chan->band_width >= RTW89_CHANNEL_WIDTH_40) 4261 rtwvif->dyn_tb_bedge_en = true; 4262 else 4263 rtwvif->dyn_tb_bedge_en = false; 4264 4265 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4266 "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n", 4267 ul_tb_info->def_if_bandedge, rtwvif->def_tri_idx); 4268 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4269 "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n", 4270 rtwvif->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en); 4271 } 4272 4273 struct rtw89_phy_ul_tb_check_data { 4274 bool valid; 4275 bool high_tf_client; 4276 bool low_tf_client; 4277 bool dyn_tb_bedge_en; 4278 u8 def_tri_idx; 4279 }; 4280 4281 struct rtw89_phy_power_diff { 4282 u32 q_00; 4283 u32 q_11; 4284 u32 q_matrix_en; 4285 u32 ultb_1t_norm_160; 4286 u32 ultb_2t_norm_160; 4287 u32 com1_norm_1sts; 4288 u32 com2_resp_1sts_path; 4289 }; 4290 4291 static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev, 4292 struct rtw89_vif *rtwvif) 4293 { 4294 static const struct rtw89_phy_power_diff table[2] = { 4295 {0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3}, 4296 {0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1}, 4297 }; 4298 const struct rtw89_phy_power_diff *param; 4299 u32 reg; 4300 4301 if (!rtwdev->chip->ul_tb_pwr_diff) 4302 return; 4303 4304 if (rtwvif->pwr_diff_en == rtwvif->pre_pwr_diff_en) { 4305 rtwvif->pwr_diff_en = false; 4306 return; 4307 } 4308 4309 rtwvif->pre_pwr_diff_en = rtwvif->pwr_diff_en; 4310 param = &table[rtwvif->pwr_diff_en]; 4311 4312 rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL, 4313 param->q_00); 4314 rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL, 4315 param->q_11); 4316 rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX, 4317 B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en); 4318 4319 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif->mac_idx); 4320 rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160, 4321 param->ultb_1t_norm_160); 4322 4323 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif->mac_idx); 4324 rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160, 4325 param->ultb_2t_norm_160); 4326 4327 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif->mac_idx); 4328 rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS, 4329 param->com1_norm_1sts); 4330 4331 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif->mac_idx); 4332 rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH, 4333 param->com2_resp_1sts_path); 4334 } 4335 4336 static 4337 void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev, 4338 struct rtw89_vif *rtwvif, 4339 struct rtw89_phy_ul_tb_check_data *ul_tb_data) 4340 { 4341 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4342 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 4343 4344 if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION) 4345 return; 4346 4347 if (!vif->cfg.assoc) 4348 return; 4349 4350 if (rtwdev->chip->ul_tb_waveform_ctrl) { 4351 if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH) 4352 ul_tb_data->high_tf_client = true; 4353 else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH) 4354 ul_tb_data->low_tf_client = true; 4355 4356 ul_tb_data->valid = true; 4357 ul_tb_data->def_tri_idx = rtwvif->def_tri_idx; 4358 ul_tb_data->dyn_tb_bedge_en = rtwvif->dyn_tb_bedge_en; 4359 } 4360 4361 rtw89_phy_ofdma_power_diff(rtwdev, rtwvif); 4362 } 4363 4364 static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev, 4365 struct rtw89_phy_ul_tb_check_data *ul_tb_data) 4366 { 4367 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4368 4369 if (!rtwdev->chip->ul_tb_waveform_ctrl) 4370 return; 4371 4372 if (ul_tb_data->dyn_tb_bedge_en) { 4373 if (ul_tb_data->high_tf_client) { 4374 rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0); 4375 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4376 "[ULTB] Turn off if_bandedge\n"); 4377 } else if (ul_tb_data->low_tf_client) { 4378 rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 4379 ul_tb_info->def_if_bandedge); 4380 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4381 "[ULTB] Set to default if_bandedge = %d\n", 4382 ul_tb_info->def_if_bandedge); 4383 } 4384 } 4385 4386 if (ul_tb_info->dyn_tb_tri_en) { 4387 if (ul_tb_data->high_tf_client) { 4388 rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, 4389 B_TXSHAPE_TRIANGULAR_CFG, 0); 4390 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4391 "[ULTB] Turn off Tx triangle\n"); 4392 } else if (ul_tb_data->low_tf_client) { 4393 rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, 4394 B_TXSHAPE_TRIANGULAR_CFG, 4395 ul_tb_data->def_tri_idx); 4396 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4397 "[ULTB] Set to default tx_shap_idx = %d\n", 4398 ul_tb_data->def_tri_idx); 4399 } 4400 } 4401 } 4402 4403 void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) 4404 { 4405 const struct rtw89_chip_info *chip = rtwdev->chip; 4406 struct rtw89_phy_ul_tb_check_data ul_tb_data = {}; 4407 struct rtw89_vif *rtwvif; 4408 4409 if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff) 4410 return; 4411 4412 if (rtwdev->total_sta_assoc != 1) 4413 return; 4414 4415 rtw89_for_each_rtwvif(rtwdev, rtwvif) 4416 rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif, &ul_tb_data); 4417 4418 if (!ul_tb_data.valid) 4419 return; 4420 4421 rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data); 4422 } 4423 4424 static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev) 4425 { 4426 const struct rtw89_chip_info *chip = rtwdev->chip; 4427 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4428 4429 if (!chip->ul_tb_waveform_ctrl) 4430 return; 4431 4432 ul_tb_info->dyn_tb_tri_en = true; 4433 ul_tb_info->def_if_bandedge = 4434 rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN); 4435 } 4436 4437 static 4438 void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts) 4439 { 4440 ewma_rssi_init(&antdiv_sts->cck_rssi_avg); 4441 ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg); 4442 ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg); 4443 antdiv_sts->pkt_cnt_cck = 0; 4444 antdiv_sts->pkt_cnt_ofdm = 0; 4445 antdiv_sts->pkt_cnt_non_legacy = 0; 4446 antdiv_sts->evm = 0; 4447 } 4448 4449 static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev, 4450 struct rtw89_rx_phy_ppdu *phy_ppdu, 4451 struct rtw89_antdiv_stats *stats) 4452 { 4453 if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) { 4454 if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) { 4455 ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg); 4456 stats->pkt_cnt_cck++; 4457 } else { 4458 ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg); 4459 stats->pkt_cnt_ofdm++; 4460 stats->evm += phy_ppdu->ofdm.evm_min; 4461 } 4462 } else { 4463 ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg); 4464 stats->pkt_cnt_non_legacy++; 4465 stats->evm += phy_ppdu->ofdm.evm_min; 4466 } 4467 } 4468 4469 static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats) 4470 { 4471 if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck && 4472 stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm) 4473 return ewma_rssi_read(&stats->non_legacy_rssi_avg); 4474 else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck && 4475 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy) 4476 return ewma_rssi_read(&stats->ofdm_rssi_avg); 4477 else 4478 return ewma_rssi_read(&stats->cck_rssi_avg); 4479 } 4480 4481 static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats) 4482 { 4483 return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm); 4484 } 4485 4486 void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev, 4487 struct rtw89_rx_phy_ppdu *phy_ppdu) 4488 { 4489 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 4490 struct rtw89_hal *hal = &rtwdev->hal; 4491 4492 if (!hal->ant_diversity || hal->ant_diversity_fixed) 4493 return; 4494 4495 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats); 4496 4497 if (!antdiv->get_stats) 4498 return; 4499 4500 if (hal->antenna_rx == RF_A) 4501 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats); 4502 else if (hal->antenna_rx == RF_B) 4503 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats); 4504 } 4505 4506 static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev) 4507 { 4508 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN, 4509 0x0, RTW89_PHY_0); 4510 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL, 4511 0x0, RTW89_PHY_0); 4512 4513 rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND, 4514 0x0, RTW89_PHY_0); 4515 rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT, 4516 0x0, RTW89_PHY_0); 4517 4518 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN, 4519 0x0, RTW89_PHY_0); 4520 4521 rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING, 4522 0x0100, RTW89_PHY_0); 4523 4524 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX, 4525 0x1, RTW89_PHY_0); 4526 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL, 4527 0x0, RTW89_PHY_0); 4528 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G, 4529 0x0, RTW89_PHY_0); 4530 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G, 4531 0x0, RTW89_PHY_0); 4532 } 4533 4534 static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev) 4535 { 4536 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 4537 4538 rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats); 4539 rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats); 4540 rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats); 4541 } 4542 4543 static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev) 4544 { 4545 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 4546 struct rtw89_hal *hal = &rtwdev->hal; 4547 4548 if (!hal->ant_diversity) 4549 return; 4550 4551 antdiv->get_stats = false; 4552 antdiv->rssi_pre = 0; 4553 rtw89_phy_antdiv_sts_reset(rtwdev); 4554 rtw89_phy_antdiv_reg_init(rtwdev); 4555 } 4556 4557 static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev) 4558 { 4559 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 4560 int i; 4561 u8 th; 4562 4563 for (i = 0; i < rtwdev->chip->rf_path_num; i++) { 4564 th = rtw89_chip_get_thermal(rtwdev, i); 4565 if (th) 4566 ewma_thermal_add(&phystat->avg_thermal[i], th); 4567 4568 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 4569 "path(%d) thermal cur=%u avg=%ld", i, th, 4570 ewma_thermal_read(&phystat->avg_thermal[i])); 4571 } 4572 } 4573 4574 struct rtw89_phy_iter_rssi_data { 4575 struct rtw89_dev *rtwdev; 4576 struct rtw89_phy_ch_info *ch_info; 4577 bool rssi_changed; 4578 }; 4579 4580 static void rtw89_phy_stat_rssi_update_iter(void *data, 4581 struct ieee80211_sta *sta) 4582 { 4583 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 4584 struct rtw89_phy_iter_rssi_data *rssi_data = 4585 (struct rtw89_phy_iter_rssi_data *)data; 4586 struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info; 4587 unsigned long rssi_curr; 4588 4589 rssi_curr = ewma_rssi_read(&rtwsta->avg_rssi); 4590 4591 if (rssi_curr < ch_info->rssi_min) { 4592 ch_info->rssi_min = rssi_curr; 4593 ch_info->rssi_min_macid = rtwsta->mac_id; 4594 } 4595 4596 if (rtwsta->prev_rssi == 0) { 4597 rtwsta->prev_rssi = rssi_curr; 4598 } else if (abs((int)rtwsta->prev_rssi - (int)rssi_curr) > (3 << RSSI_FACTOR)) { 4599 rtwsta->prev_rssi = rssi_curr; 4600 rssi_data->rssi_changed = true; 4601 } 4602 } 4603 4604 static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev) 4605 { 4606 struct rtw89_phy_iter_rssi_data rssi_data = {0}; 4607 4608 rssi_data.rtwdev = rtwdev; 4609 rssi_data.ch_info = &rtwdev->ch_info; 4610 rssi_data.ch_info->rssi_min = U8_MAX; 4611 ieee80211_iterate_stations_atomic(rtwdev->hw, 4612 rtw89_phy_stat_rssi_update_iter, 4613 &rssi_data); 4614 if (rssi_data.rssi_changed) 4615 rtw89_btc_ntfy_wl_sta(rtwdev); 4616 } 4617 4618 static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev) 4619 { 4620 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 4621 int i; 4622 4623 for (i = 0; i < rtwdev->chip->rf_path_num; i++) 4624 ewma_thermal_init(&phystat->avg_thermal[i]); 4625 4626 rtw89_phy_stat_thermal_update(rtwdev); 4627 4628 memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); 4629 memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat)); 4630 } 4631 4632 void rtw89_phy_stat_track(struct rtw89_dev *rtwdev) 4633 { 4634 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 4635 4636 rtw89_phy_stat_thermal_update(rtwdev); 4637 rtw89_phy_stat_rssi_update(rtwdev); 4638 4639 phystat->last_pkt_stat = phystat->cur_pkt_stat; 4640 memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); 4641 } 4642 4643 static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us) 4644 { 4645 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4646 4647 return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); 4648 } 4649 4650 static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx) 4651 { 4652 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4653 4654 return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); 4655 } 4656 4657 static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) 4658 { 4659 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4660 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4661 const struct rtw89_ccx_regs *ccx = phy->ccx; 4662 4663 env->ccx_manual_ctrl = false; 4664 env->ccx_ongoing = false; 4665 env->ccx_rac_lv = RTW89_RAC_RELEASE; 4666 env->ccx_period = 0; 4667 env->ccx_unit_idx = RTW89_CCX_32_US; 4668 4669 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1); 4670 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1); 4671 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); 4672 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask, 4673 RTW89_CCX_EDCCA_BW20_0); 4674 } 4675 4676 static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report, 4677 u16 score) 4678 { 4679 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4680 u32 numer = 0; 4681 u16 ret = 0; 4682 4683 numer = report * score + (env->ccx_period >> 1); 4684 if (env->ccx_period) 4685 ret = numer / env->ccx_period; 4686 4687 return ret >= score ? score - 1 : ret; 4688 } 4689 4690 static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev, 4691 u16 time_ms, u32 *period, 4692 u32 *unit_idx) 4693 { 4694 u32 idx; 4695 u8 quotient; 4696 4697 if (time_ms >= CCX_MAX_PERIOD) 4698 time_ms = CCX_MAX_PERIOD; 4699 4700 quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD; 4701 4702 if (quotient < 4) 4703 idx = RTW89_CCX_4_US; 4704 else if (quotient < 8) 4705 idx = RTW89_CCX_8_US; 4706 else if (quotient < 16) 4707 idx = RTW89_CCX_16_US; 4708 else 4709 idx = RTW89_CCX_32_US; 4710 4711 *unit_idx = idx; 4712 *period = (time_ms * MS_TO_4US_RATIO) >> idx; 4713 4714 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4715 "[Trigger Time] period:%d, unit_idx:%d\n", 4716 *period, *unit_idx); 4717 } 4718 4719 static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev) 4720 { 4721 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4722 4723 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4724 "lv:(%d)->(0)\n", env->ccx_rac_lv); 4725 4726 env->ccx_ongoing = false; 4727 env->ccx_rac_lv = RTW89_RAC_RELEASE; 4728 env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 4729 } 4730 4731 static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev, 4732 struct rtw89_ccx_para_info *para) 4733 { 4734 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4735 bool is_update = env->ifs_clm_app != para->ifs_clm_app; 4736 u8 i = 0; 4737 u16 *ifs_th_l = env->ifs_clm_th_l; 4738 u16 *ifs_th_h = env->ifs_clm_th_h; 4739 u32 ifs_th0_us = 0, ifs_th_times = 0; 4740 u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0}; 4741 4742 if (!is_update) 4743 goto ifs_update_finished; 4744 4745 switch (para->ifs_clm_app) { 4746 case RTW89_IFS_CLM_INIT: 4747 case RTW89_IFS_CLM_BACKGROUND: 4748 case RTW89_IFS_CLM_ACS: 4749 case RTW89_IFS_CLM_DBG: 4750 case RTW89_IFS_CLM_DIG: 4751 case RTW89_IFS_CLM_TDMA_DIG: 4752 ifs_th0_us = IFS_CLM_TH0_UPPER; 4753 ifs_th_times = IFS_CLM_TH_MUL; 4754 break; 4755 case RTW89_IFS_CLM_DBG_MANUAL: 4756 ifs_th0_us = para->ifs_clm_manual_th0; 4757 ifs_th_times = para->ifs_clm_manual_th_times; 4758 break; 4759 default: 4760 break; 4761 } 4762 4763 /* Set sampling threshold for 4 different regions, unit in idx_cnt. 4764 * low[i] = high[i-1] + 1 4765 * high[i] = high[i-1] * ifs_th_times 4766 */ 4767 ifs_th_l[IFS_CLM_TH_START_IDX] = 0; 4768 ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us; 4769 ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, 4770 ifs_th0_us); 4771 for (i = 1; i < RTW89_IFS_CLM_NUM; i++) { 4772 ifs_th_l[i] = ifs_th_h[i - 1] + 1; 4773 ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times; 4774 ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]); 4775 } 4776 4777 ifs_update_finished: 4778 if (!is_update) 4779 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4780 "No need to update IFS_TH\n"); 4781 4782 return is_update; 4783 } 4784 4785 static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) 4786 { 4787 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4788 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4789 const struct rtw89_ccx_regs *ccx = phy->ccx; 4790 u8 i = 0; 4791 4792 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask, 4793 env->ifs_clm_th_l[0]); 4794 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask, 4795 env->ifs_clm_th_l[1]); 4796 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask, 4797 env->ifs_clm_th_l[2]); 4798 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask, 4799 env->ifs_clm_th_l[3]); 4800 4801 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask, 4802 env->ifs_clm_th_h[0]); 4803 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask, 4804 env->ifs_clm_th_h[1]); 4805 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask, 4806 env->ifs_clm_th_h[2]); 4807 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask, 4808 env->ifs_clm_th_h[3]); 4809 4810 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 4811 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4812 "Update IFS_T%d_th{low, high} : {%d, %d}\n", 4813 i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]); 4814 } 4815 4816 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) 4817 { 4818 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4819 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4820 const struct rtw89_ccx_regs *ccx = phy->ccx; 4821 struct rtw89_ccx_para_info para = {0}; 4822 4823 env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 4824 env->ifs_clm_mntr_time = 0; 4825 4826 para.ifs_clm_app = RTW89_IFS_CLM_INIT; 4827 if (rtw89_phy_ifs_clm_th_update_check(rtwdev, ¶)) 4828 rtw89_phy_ifs_clm_set_th_reg(rtwdev); 4829 4830 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true); 4831 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true); 4832 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true); 4833 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true); 4834 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true); 4835 } 4836 4837 static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, 4838 enum rtw89_env_racing_lv level) 4839 { 4840 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4841 int ret = 0; 4842 4843 if (level >= RTW89_RAC_MAX_NUM) { 4844 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4845 "[WARNING] Wrong LV=%d\n", level); 4846 return -EINVAL; 4847 } 4848 4849 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4850 "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing, 4851 env->ccx_rac_lv, level); 4852 4853 if (env->ccx_ongoing) { 4854 if (level <= env->ccx_rac_lv) 4855 ret = -EINVAL; 4856 else 4857 env->ccx_ongoing = false; 4858 } 4859 4860 if (ret == 0) 4861 env->ccx_rac_lv = level; 4862 4863 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n", 4864 !ret); 4865 4866 return ret; 4867 } 4868 4869 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev) 4870 { 4871 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4872 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4873 const struct rtw89_ccx_regs *ccx = phy->ccx; 4874 4875 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0); 4876 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0); 4877 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1); 4878 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); 4879 4880 env->ccx_ongoing = true; 4881 } 4882 4883 static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) 4884 { 4885 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4886 u8 i = 0; 4887 u32 res = 0; 4888 4889 env->ifs_clm_tx_ratio = 4890 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT); 4891 env->ifs_clm_edcca_excl_cca_ratio = 4892 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca, 4893 PERCENT); 4894 env->ifs_clm_cck_fa_ratio = 4895 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT); 4896 env->ifs_clm_ofdm_fa_ratio = 4897 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT); 4898 env->ifs_clm_cck_cca_excl_fa_ratio = 4899 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa, 4900 PERCENT); 4901 env->ifs_clm_ofdm_cca_excl_fa_ratio = 4902 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa, 4903 PERCENT); 4904 env->ifs_clm_cck_fa_permil = 4905 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL); 4906 env->ifs_clm_ofdm_fa_permil = 4907 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL); 4908 4909 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) { 4910 if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) { 4911 env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD; 4912 } else { 4913 env->ifs_clm_ifs_avg[i] = 4914 rtw89_phy_ccx_idx_to_us(rtwdev, 4915 env->ifs_clm_avg[i]); 4916 } 4917 4918 res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]); 4919 res += env->ifs_clm_his[i] >> 1; 4920 if (env->ifs_clm_his[i]) 4921 res /= env->ifs_clm_his[i]; 4922 else 4923 res = 0; 4924 env->ifs_clm_cca_avg[i] = res; 4925 } 4926 4927 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4928 "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n", 4929 env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio); 4930 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4931 "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n", 4932 env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio); 4933 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4934 "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n", 4935 env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil); 4936 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4937 "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n", 4938 env->ifs_clm_cck_cca_excl_fa_ratio, 4939 env->ifs_clm_ofdm_cca_excl_fa_ratio); 4940 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4941 "Time:[his, ifs_avg(us), cca_avg(us)]\n"); 4942 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 4943 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n", 4944 i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i], 4945 env->ifs_clm_cca_avg[i]); 4946 } 4947 4948 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) 4949 { 4950 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4951 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 4952 const struct rtw89_ccx_regs *ccx = phy->ccx; 4953 u8 i = 0; 4954 4955 if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, 4956 ccx->ifs_cnt_done_mask) == 0) { 4957 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 4958 "Get IFS_CLM report Fail\n"); 4959 return false; 4960 } 4961 4962 env->ifs_clm_tx = 4963 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, 4964 ccx->ifs_clm_tx_cnt_msk); 4965 env->ifs_clm_edcca_excl_cca = 4966 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, 4967 ccx->ifs_clm_edcca_excl_cca_fa_mask); 4968 env->ifs_clm_cckcca_excl_fa = 4969 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, 4970 ccx->ifs_clm_cckcca_excl_fa_mask); 4971 env->ifs_clm_ofdmcca_excl_fa = 4972 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, 4973 ccx->ifs_clm_ofdmcca_excl_fa_mask); 4974 env->ifs_clm_cckfa = 4975 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, 4976 ccx->ifs_clm_cck_fa_mask); 4977 env->ifs_clm_ofdmfa = 4978 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, 4979 ccx->ifs_clm_ofdm_fa_mask); 4980 4981 env->ifs_clm_his[0] = 4982 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 4983 ccx->ifs_t1_his_mask); 4984 env->ifs_clm_his[1] = 4985 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 4986 ccx->ifs_t2_his_mask); 4987 env->ifs_clm_his[2] = 4988 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 4989 ccx->ifs_t3_his_mask); 4990 env->ifs_clm_his[3] = 4991 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 4992 ccx->ifs_t4_his_mask); 4993 4994 env->ifs_clm_avg[0] = 4995 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, 4996 ccx->ifs_t1_avg_mask); 4997 env->ifs_clm_avg[1] = 4998 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, 4999 ccx->ifs_t2_avg_mask); 5000 env->ifs_clm_avg[2] = 5001 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, 5002 ccx->ifs_t3_avg_mask); 5003 env->ifs_clm_avg[3] = 5004 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, 5005 ccx->ifs_t4_avg_mask); 5006 5007 env->ifs_clm_cca[0] = 5008 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, 5009 ccx->ifs_t1_cca_mask); 5010 env->ifs_clm_cca[1] = 5011 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, 5012 ccx->ifs_t2_cca_mask); 5013 env->ifs_clm_cca[2] = 5014 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, 5015 ccx->ifs_t3_cca_mask); 5016 env->ifs_clm_cca[3] = 5017 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, 5018 ccx->ifs_t4_cca_mask); 5019 5020 env->ifs_clm_total_ifs = 5021 rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, 5022 ccx->ifs_total_mask); 5023 5024 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n", 5025 env->ifs_clm_total_ifs); 5026 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5027 "{Tx, EDCCA_exclu_cca} = {%d, %d}\n", 5028 env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca); 5029 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5030 "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n", 5031 env->ifs_clm_cckfa, env->ifs_clm_ofdmfa); 5032 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5033 "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n", 5034 env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa); 5035 5036 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n"); 5037 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5038 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5039 "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i], 5040 env->ifs_clm_avg[i], env->ifs_clm_cca[i]); 5041 5042 rtw89_phy_ifs_clm_get_utility(rtwdev); 5043 5044 return true; 5045 } 5046 5047 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, 5048 struct rtw89_ccx_para_info *para) 5049 { 5050 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5051 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5052 const struct rtw89_ccx_regs *ccx = phy->ccx; 5053 u32 period = 0; 5054 u32 unit_idx = 0; 5055 5056 if (para->mntr_time == 0) { 5057 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5058 "[WARN] MNTR_TIME is 0\n"); 5059 return -EINVAL; 5060 } 5061 5062 if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv)) 5063 return -EINVAL; 5064 5065 if (para->mntr_time != env->ifs_clm_mntr_time) { 5066 rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, 5067 &period, &unit_idx); 5068 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, 5069 ccx->ifs_clm_period_mask, period); 5070 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, 5071 ccx->ifs_clm_cnt_unit_mask, 5072 unit_idx); 5073 5074 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5075 "Update IFS-CLM time ((%d)) -> ((%d))\n", 5076 env->ifs_clm_mntr_time, para->mntr_time); 5077 5078 env->ifs_clm_mntr_time = para->mntr_time; 5079 env->ccx_period = (u16)period; 5080 env->ccx_unit_idx = (u8)unit_idx; 5081 } 5082 5083 if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) { 5084 env->ifs_clm_app = para->ifs_clm_app; 5085 rtw89_phy_ifs_clm_set_th_reg(rtwdev); 5086 } 5087 5088 return 0; 5089 } 5090 5091 void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) 5092 { 5093 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5094 struct rtw89_ccx_para_info para = {0}; 5095 u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL; 5096 5097 env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL; 5098 if (env->ccx_manual_ctrl) { 5099 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5100 "CCX in manual ctrl\n"); 5101 return; 5102 } 5103 5104 /* only ifs_clm for now */ 5105 if (rtw89_phy_ifs_clm_get_result(rtwdev)) 5106 env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM; 5107 5108 rtw89_phy_ccx_racing_release(rtwdev); 5109 para.mntr_time = 1900; 5110 para.rac_lv = RTW89_RAC_LV_1; 5111 para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5112 5113 if (rtw89_phy_ifs_clm_set(rtwdev, ¶) == 0) 5114 chk_result |= RTW89_PHY_ENV_MON_IFS_CLM; 5115 if (chk_result) 5116 rtw89_phy_ccx_trigger(rtwdev); 5117 5118 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5119 "get_result=0x%x, chk_result:0x%x\n", 5120 env->ccx_watchdog_result, chk_result); 5121 } 5122 5123 static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page) 5124 { 5125 if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM || 5126 *ie_page == RTW89_RSVD_9) 5127 return false; 5128 else if (*ie_page > RTW89_RSVD_9) 5129 *ie_page -= 1; 5130 5131 return true; 5132 } 5133 5134 static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page) 5135 { 5136 static const u8 ie_page_shift = 2; 5137 5138 return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift); 5139 } 5140 5141 static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev, 5142 enum rtw89_phy_status_bitmap ie_page) 5143 { 5144 u32 addr; 5145 5146 if (!rtw89_physts_ie_page_valid(&ie_page)) 5147 return 0; 5148 5149 addr = rtw89_phy_get_ie_bitmap_addr(ie_page); 5150 5151 return rtw89_phy_read32(rtwdev, addr); 5152 } 5153 5154 static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev, 5155 enum rtw89_phy_status_bitmap ie_page, 5156 u32 val) 5157 { 5158 const struct rtw89_chip_info *chip = rtwdev->chip; 5159 u32 addr; 5160 5161 if (!rtw89_physts_ie_page_valid(&ie_page)) 5162 return; 5163 5164 if (chip->chip_id == RTL8852A) 5165 val &= B_PHY_STS_BITMAP_MSK_52A; 5166 5167 addr = rtw89_phy_get_ie_bitmap_addr(ie_page); 5168 rtw89_phy_write32(rtwdev, addr, val); 5169 } 5170 5171 static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev, 5172 enum rtw89_phy_status_bitmap bitmap, 5173 enum rtw89_phy_status_ie_type ie, 5174 bool enable) 5175 { 5176 u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap); 5177 5178 if (enable) 5179 val |= BIT(ie); 5180 else 5181 val &= ~BIT(ie); 5182 5183 rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val); 5184 } 5185 5186 static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev, 5187 bool enable, 5188 enum rtw89_phy_idx phy_idx) 5189 { 5190 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5191 const struct rtw89_physts_regs *physts = phy->physts; 5192 5193 if (enable) { 5194 rtw89_phy_write32_clr(rtwdev, physts->setting_addr, 5195 physts->dis_trigger_fail_mask); 5196 rtw89_phy_write32_clr(rtwdev, physts->setting_addr, 5197 physts->dis_trigger_brk_mask); 5198 } else { 5199 rtw89_phy_write32_set(rtwdev, physts->setting_addr, 5200 physts->dis_trigger_fail_mask); 5201 rtw89_phy_write32_set(rtwdev, physts->setting_addr, 5202 physts->dis_trigger_brk_mask); 5203 } 5204 } 5205 5206 static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev) 5207 { 5208 u8 i; 5209 5210 rtw89_physts_enable_fail_report(rtwdev, false, RTW89_PHY_0); 5211 5212 for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) { 5213 if (i >= RTW89_CCK_PKT) 5214 rtw89_physts_enable_ie_bitmap(rtwdev, i, 5215 RTW89_PHYSTS_IE09_FTR_0, 5216 true); 5217 if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) || 5218 (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT)) 5219 continue; 5220 rtw89_physts_enable_ie_bitmap(rtwdev, i, 5221 RTW89_PHYSTS_IE24_OFDM_TD_PATH_A, 5222 true); 5223 } 5224 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT, 5225 RTW89_PHYSTS_IE13_DL_MU_DEF, true); 5226 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT, 5227 RTW89_PHYSTS_IE13_DL_MU_DEF, true); 5228 5229 /* force IE01 for channel index, only channel field is valid */ 5230 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT, 5231 RTW89_PHYSTS_IE01_CMN_OFDM, true); 5232 } 5233 5234 static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) 5235 { 5236 const struct rtw89_chip_info *chip = rtwdev->chip; 5237 struct rtw89_dig_info *dig = &rtwdev->dig; 5238 const struct rtw89_phy_dig_gain_cfg *cfg; 5239 const char *msg; 5240 u8 i; 5241 s8 gain_base; 5242 s8 *gain_arr; 5243 u32 tmp; 5244 5245 switch (type) { 5246 case RTW89_DIG_GAIN_LNA_G: 5247 gain_arr = dig->lna_gain_g; 5248 gain_base = LNA0_GAIN; 5249 cfg = chip->dig_table->cfg_lna_g; 5250 msg = "lna_gain_g"; 5251 break; 5252 case RTW89_DIG_GAIN_TIA_G: 5253 gain_arr = dig->tia_gain_g; 5254 gain_base = TIA0_GAIN_G; 5255 cfg = chip->dig_table->cfg_tia_g; 5256 msg = "tia_gain_g"; 5257 break; 5258 case RTW89_DIG_GAIN_LNA_A: 5259 gain_arr = dig->lna_gain_a; 5260 gain_base = LNA0_GAIN; 5261 cfg = chip->dig_table->cfg_lna_a; 5262 msg = "lna_gain_a"; 5263 break; 5264 case RTW89_DIG_GAIN_TIA_A: 5265 gain_arr = dig->tia_gain_a; 5266 gain_base = TIA0_GAIN_A; 5267 cfg = chip->dig_table->cfg_tia_a; 5268 msg = "tia_gain_a"; 5269 break; 5270 default: 5271 return; 5272 } 5273 5274 for (i = 0; i < cfg->size; i++) { 5275 tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr, 5276 cfg->table[i].mask); 5277 tmp >>= DIG_GAIN_SHIFT; 5278 gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base; 5279 gain_base += DIG_GAIN; 5280 5281 rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n", 5282 msg, i, gain_arr[i]); 5283 } 5284 } 5285 5286 static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev) 5287 { 5288 struct rtw89_dig_info *dig = &rtwdev->dig; 5289 u32 tmp; 5290 u8 i; 5291 5292 if (!rtwdev->hal.support_igi) 5293 return; 5294 5295 tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW, 5296 B_PATH0_IB_PKPW_MSK); 5297 dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT); 5298 dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK, 5299 B_PATH0_IB_PBK_MSK); 5300 rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n", 5301 dig->ib_pkpwr, dig->ib_pbk); 5302 5303 for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++) 5304 rtw89_phy_dig_read_gain_table(rtwdev, i); 5305 } 5306 5307 static const u8 rssi_nolink = 22; 5308 static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104}; 5309 static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88}; 5310 static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16}; 5311 static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528}; 5312 5313 static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev) 5314 { 5315 struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; 5316 struct rtw89_dig_info *dig = &rtwdev->dig; 5317 bool is_linked = rtwdev->total_sta_assoc > 0; 5318 5319 if (is_linked) { 5320 dig->igi_rssi = ch_info->rssi_min >> 1; 5321 } else { 5322 rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n"); 5323 dig->igi_rssi = rssi_nolink; 5324 } 5325 } 5326 5327 static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev) 5328 { 5329 struct rtw89_dig_info *dig = &rtwdev->dig; 5330 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 5331 bool is_linked = rtwdev->total_sta_assoc > 0; 5332 const u16 *fa_th_src = NULL; 5333 5334 switch (chan->band_type) { 5335 case RTW89_BAND_2G: 5336 dig->lna_gain = dig->lna_gain_g; 5337 dig->tia_gain = dig->tia_gain_g; 5338 fa_th_src = is_linked ? fa_th_2g : fa_th_nolink; 5339 dig->force_gaincode_idx_en = false; 5340 dig->dyn_pd_th_en = true; 5341 break; 5342 case RTW89_BAND_5G: 5343 default: 5344 dig->lna_gain = dig->lna_gain_a; 5345 dig->tia_gain = dig->tia_gain_a; 5346 fa_th_src = is_linked ? fa_th_5g : fa_th_nolink; 5347 dig->force_gaincode_idx_en = true; 5348 dig->dyn_pd_th_en = true; 5349 break; 5350 } 5351 memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th)); 5352 memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th)); 5353 } 5354 5355 static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20; 5356 static const u8 igi_max_performance_mode = 0x5a; 5357 static const u8 dynamic_pd_threshold_max; 5358 5359 static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev) 5360 { 5361 struct rtw89_dig_info *dig = &rtwdev->dig; 5362 5363 dig->cur_gaincode.lna_idx = LNA_IDX_MAX; 5364 dig->cur_gaincode.tia_idx = TIA_IDX_MAX; 5365 dig->cur_gaincode.rxb_idx = RXB_IDX_MAX; 5366 dig->force_gaincode.lna_idx = LNA_IDX_MAX; 5367 dig->force_gaincode.tia_idx = TIA_IDX_MAX; 5368 dig->force_gaincode.rxb_idx = RXB_IDX_MAX; 5369 5370 dig->dyn_igi_max = igi_max_performance_mode; 5371 dig->dyn_igi_min = dynamic_igi_min; 5372 dig->dyn_pd_th_max = dynamic_pd_threshold_max; 5373 dig->pd_low_th_ofst = pd_low_th_offset; 5374 dig->is_linked_pre = false; 5375 } 5376 5377 static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev) 5378 { 5379 rtw89_phy_dig_update_gain_para(rtwdev); 5380 rtw89_phy_dig_reset(rtwdev); 5381 } 5382 5383 static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) 5384 { 5385 struct rtw89_dig_info *dig = &rtwdev->dig; 5386 u8 lna_idx; 5387 5388 if (rssi < dig->igi_rssi_th[0]) 5389 lna_idx = RTW89_DIG_GAIN_LNA_IDX6; 5390 else if (rssi < dig->igi_rssi_th[1]) 5391 lna_idx = RTW89_DIG_GAIN_LNA_IDX5; 5392 else if (rssi < dig->igi_rssi_th[2]) 5393 lna_idx = RTW89_DIG_GAIN_LNA_IDX4; 5394 else if (rssi < dig->igi_rssi_th[3]) 5395 lna_idx = RTW89_DIG_GAIN_LNA_IDX3; 5396 else if (rssi < dig->igi_rssi_th[4]) 5397 lna_idx = RTW89_DIG_GAIN_LNA_IDX2; 5398 else 5399 lna_idx = RTW89_DIG_GAIN_LNA_IDX1; 5400 5401 return lna_idx; 5402 } 5403 5404 static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) 5405 { 5406 struct rtw89_dig_info *dig = &rtwdev->dig; 5407 u8 tia_idx; 5408 5409 if (rssi < dig->igi_rssi_th[0]) 5410 tia_idx = RTW89_DIG_GAIN_TIA_IDX1; 5411 else 5412 tia_idx = RTW89_DIG_GAIN_TIA_IDX0; 5413 5414 return tia_idx; 5415 } 5416 5417 #define IB_PBK_BASE 110 5418 #define WB_RSSI_BASE 10 5419 static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, 5420 struct rtw89_agc_gaincode_set *set) 5421 { 5422 struct rtw89_dig_info *dig = &rtwdev->dig; 5423 s8 lna_gain = dig->lna_gain[set->lna_idx]; 5424 s8 tia_gain = dig->tia_gain[set->tia_idx]; 5425 s32 wb_rssi = rssi + lna_gain + tia_gain; 5426 s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE; 5427 u8 rxb_idx; 5428 5429 rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi; 5430 rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX); 5431 5432 rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n", 5433 wb_rssi, rxb_idx_tmp); 5434 5435 return rxb_idx; 5436 } 5437 5438 static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, 5439 struct rtw89_agc_gaincode_set *set) 5440 { 5441 set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi); 5442 set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi); 5443 set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set); 5444 5445 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5446 "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n", 5447 rssi, set->lna_idx, set->tia_idx, set->rxb_idx); 5448 } 5449 5450 #define IGI_OFFSET_MAX 25 5451 #define IGI_OFFSET_MUL 2 5452 static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev) 5453 { 5454 struct rtw89_dig_info *dig = &rtwdev->dig; 5455 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5456 enum rtw89_dig_noisy_level noisy_lv; 5457 u8 igi_offset = dig->fa_rssi_ofst; 5458 u16 fa_ratio = 0; 5459 5460 fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil; 5461 5462 if (fa_ratio < dig->fa_th[0]) 5463 noisy_lv = RTW89_DIG_NOISY_LEVEL0; 5464 else if (fa_ratio < dig->fa_th[1]) 5465 noisy_lv = RTW89_DIG_NOISY_LEVEL1; 5466 else if (fa_ratio < dig->fa_th[2]) 5467 noisy_lv = RTW89_DIG_NOISY_LEVEL2; 5468 else if (fa_ratio < dig->fa_th[3]) 5469 noisy_lv = RTW89_DIG_NOISY_LEVEL3; 5470 else 5471 noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX; 5472 5473 if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2) 5474 igi_offset = 0; 5475 else 5476 igi_offset += noisy_lv * IGI_OFFSET_MUL; 5477 5478 igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX); 5479 dig->fa_rssi_ofst = igi_offset; 5480 5481 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5482 "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n", 5483 dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]); 5484 5485 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5486 "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n", 5487 env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil, 5488 env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil, 5489 noisy_lv, igi_offset); 5490 } 5491 5492 static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx) 5493 { 5494 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5495 5496 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr, 5497 dig_regs->p0_lna_init.mask, lna_idx); 5498 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr, 5499 dig_regs->p1_lna_init.mask, lna_idx); 5500 } 5501 5502 static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx) 5503 { 5504 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5505 5506 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr, 5507 dig_regs->p0_tia_init.mask, tia_idx); 5508 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr, 5509 dig_regs->p1_tia_init.mask, tia_idx); 5510 } 5511 5512 static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx) 5513 { 5514 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5515 5516 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr, 5517 dig_regs->p0_rxb_init.mask, rxb_idx); 5518 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr, 5519 dig_regs->p1_rxb_init.mask, rxb_idx); 5520 } 5521 5522 static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev, 5523 const struct rtw89_agc_gaincode_set set) 5524 { 5525 if (!rtwdev->hal.support_igi) 5526 return; 5527 5528 rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx); 5529 rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx); 5530 rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx); 5531 5532 rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n", 5533 set.lna_idx, set.tia_idx, set.rxb_idx); 5534 } 5535 5536 static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev, 5537 bool enable) 5538 { 5539 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5540 5541 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr, 5542 dig_regs->p0_p20_pagcugc_en.mask, enable); 5543 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr, 5544 dig_regs->p0_s20_pagcugc_en.mask, enable); 5545 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr, 5546 dig_regs->p1_p20_pagcugc_en.mask, enable); 5547 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr, 5548 dig_regs->p1_s20_pagcugc_en.mask, enable); 5549 5550 rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable); 5551 } 5552 5553 static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev) 5554 { 5555 struct rtw89_dig_info *dig = &rtwdev->dig; 5556 5557 if (!rtwdev->hal.support_igi) 5558 return; 5559 5560 if (dig->force_gaincode_idx_en) { 5561 rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); 5562 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5563 "Force gaincode index enabled.\n"); 5564 } else { 5565 rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi, 5566 &dig->cur_gaincode); 5567 rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode); 5568 } 5569 } 5570 5571 static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, 5572 bool enable) 5573 { 5574 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 5575 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 5576 enum rtw89_bandwidth cbw = chan->band_width; 5577 struct rtw89_dig_info *dig = &rtwdev->dig; 5578 u8 final_rssi = 0, under_region = dig->pd_low_th_ofst; 5579 u8 ofdm_cca_th; 5580 s8 cck_cca_th; 5581 u32 pd_val = 0; 5582 5583 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) 5584 under_region += PD_TH_SB_FLTR_CMP_VAL; 5585 5586 switch (cbw) { 5587 case RTW89_CHANNEL_WIDTH_40: 5588 under_region += PD_TH_BW40_CMP_VAL; 5589 break; 5590 case RTW89_CHANNEL_WIDTH_80: 5591 under_region += PD_TH_BW80_CMP_VAL; 5592 break; 5593 case RTW89_CHANNEL_WIDTH_160: 5594 under_region += PD_TH_BW160_CMP_VAL; 5595 break; 5596 case RTW89_CHANNEL_WIDTH_20: 5597 fallthrough; 5598 default: 5599 under_region += PD_TH_BW20_CMP_VAL; 5600 break; 5601 } 5602 5603 dig->dyn_pd_th_max = dig->igi_rssi; 5604 5605 final_rssi = min_t(u8, rssi, dig->igi_rssi); 5606 ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region, 5607 PD_TH_MAX_RSSI + under_region); 5608 5609 if (enable) { 5610 pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1; 5611 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5612 "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n", 5613 final_rssi, ofdm_cca_th, under_region, pd_val); 5614 } else { 5615 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5616 "Dynamic PD th disabled, Set PD_low_bd=0\n"); 5617 } 5618 5619 rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, 5620 dig_regs->pd_lower_bound_mask, pd_val); 5621 rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, 5622 dig_regs->pd_spatial_reuse_en, enable); 5623 5624 if (!rtwdev->hal.support_cckpd) 5625 return; 5626 5627 cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI); 5628 pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX); 5629 5630 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5631 "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n", 5632 final_rssi, cck_cca_th, under_region, pd_val); 5633 5634 rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg, 5635 dig_regs->bmode_cca_rssi_limit_en, enable); 5636 rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg, 5637 dig_regs->bmode_rssi_nocca_low_th_mask, pd_val); 5638 } 5639 5640 void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) 5641 { 5642 struct rtw89_dig_info *dig = &rtwdev->dig; 5643 5644 dig->bypass_dig = false; 5645 rtw89_phy_dig_para_reset(rtwdev); 5646 rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); 5647 rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false); 5648 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); 5649 rtw89_phy_dig_update_para(rtwdev); 5650 } 5651 5652 #define IGI_RSSI_MIN 10 5653 void rtw89_phy_dig(struct rtw89_dev *rtwdev) 5654 { 5655 struct rtw89_dig_info *dig = &rtwdev->dig; 5656 bool is_linked = rtwdev->total_sta_assoc > 0; 5657 5658 if (unlikely(dig->bypass_dig)) { 5659 dig->bypass_dig = false; 5660 return; 5661 } 5662 5663 if (!dig->is_linked_pre && is_linked) { 5664 rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n"); 5665 rtw89_phy_dig_update_para(rtwdev); 5666 } else if (dig->is_linked_pre && !is_linked) { 5667 rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n"); 5668 rtw89_phy_dig_update_para(rtwdev); 5669 } 5670 dig->is_linked_pre = is_linked; 5671 5672 rtw89_phy_dig_igi_offset_by_env(rtwdev); 5673 rtw89_phy_dig_update_rssi_info(rtwdev); 5674 5675 dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ? 5676 dig->igi_rssi - IGI_RSSI_MIN : 0; 5677 dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX; 5678 dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst; 5679 5680 dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min, 5681 dig->dyn_igi_max); 5682 5683 rtw89_debug(rtwdev, RTW89_DBG_DIG, 5684 "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n", 5685 dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, 5686 dig->igi_fa_rssi); 5687 5688 rtw89_phy_dig_config_igi(rtwdev); 5689 5690 rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); 5691 5692 if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max) 5693 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true); 5694 else 5695 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); 5696 } 5697 5698 static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta) 5699 { 5700 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 5701 struct rtw89_dev *rtwdev = rtwsta->rtwdev; 5702 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 5703 struct rtw89_hal *hal = &rtwdev->hal; 5704 bool *done = data; 5705 u8 rssi_a, rssi_b; 5706 u32 candidate; 5707 5708 if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION || sta->tdls) 5709 return; 5710 5711 if (*done) 5712 return; 5713 5714 *done = true; 5715 5716 rssi_a = ewma_rssi_read(&rtwsta->rssi[RF_PATH_A]); 5717 rssi_b = ewma_rssi_read(&rtwsta->rssi[RF_PATH_B]); 5718 5719 if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH) 5720 candidate = RF_A; 5721 else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH) 5722 candidate = RF_B; 5723 else 5724 return; 5725 5726 if (hal->antenna_tx == candidate) 5727 return; 5728 5729 hal->antenna_tx = candidate; 5730 rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta); 5731 5732 if (hal->antenna_tx == RF_A) { 5733 rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12); 5734 rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11); 5735 } else if (hal->antenna_tx == RF_B) { 5736 rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11); 5737 rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12); 5738 } 5739 } 5740 5741 void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev) 5742 { 5743 struct rtw89_hal *hal = &rtwdev->hal; 5744 bool done = false; 5745 5746 if (!hal->tx_path_diversity) 5747 return; 5748 5749 ieee80211_iterate_stations_atomic(rtwdev->hw, 5750 rtw89_phy_tx_path_div_sta_iter, 5751 &done); 5752 } 5753 5754 #define ANTDIV_MAIN 0 5755 #define ANTDIV_AUX 1 5756 5757 static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev) 5758 { 5759 struct rtw89_hal *hal = &rtwdev->hal; 5760 u8 default_ant, optional_ant; 5761 5762 if (!hal->ant_diversity || hal->antenna_tx == 0) 5763 return; 5764 5765 if (hal->antenna_tx == RF_B) { 5766 default_ant = ANTDIV_AUX; 5767 optional_ant = ANTDIV_MAIN; 5768 } else { 5769 default_ant = ANTDIV_MAIN; 5770 optional_ant = ANTDIV_AUX; 5771 } 5772 5773 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL, 5774 default_ant, RTW89_PHY_0); 5775 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI, 5776 default_ant, RTW89_PHY_0); 5777 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT, 5778 optional_ant, RTW89_PHY_0); 5779 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI, 5780 default_ant, RTW89_PHY_0); 5781 } 5782 5783 static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev) 5784 { 5785 struct rtw89_hal *hal = &rtwdev->hal; 5786 5787 hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A; 5788 hal->antenna_tx = hal->antenna_rx; 5789 } 5790 5791 static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev) 5792 { 5793 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5794 struct rtw89_hal *hal = &rtwdev->hal; 5795 bool no_change = false; 5796 u8 main_rssi, aux_rssi; 5797 u8 main_evm, aux_evm; 5798 u32 candidate; 5799 5800 antdiv->get_stats = false; 5801 antdiv->training_count = 0; 5802 5803 main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats); 5804 main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats); 5805 aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats); 5806 aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats); 5807 5808 if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH) 5809 candidate = RF_A; 5810 else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH) 5811 candidate = RF_B; 5812 else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH) 5813 candidate = RF_A; 5814 else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH) 5815 candidate = RF_B; 5816 else 5817 no_change = true; 5818 5819 if (no_change) { 5820 /* swap back from training antenna to original */ 5821 rtw89_phy_swap_hal_antenna(rtwdev); 5822 return; 5823 } 5824 5825 hal->antenna_tx = candidate; 5826 hal->antenna_rx = candidate; 5827 } 5828 5829 static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev) 5830 { 5831 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5832 u64 state_period; 5833 5834 if (antdiv->training_count % 2 == 0) { 5835 if (antdiv->training_count == 0) 5836 rtw89_phy_antdiv_sts_reset(rtwdev); 5837 5838 antdiv->get_stats = true; 5839 state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL); 5840 } else { 5841 antdiv->get_stats = false; 5842 state_period = msecs_to_jiffies(ANTDIV_DELAY); 5843 5844 rtw89_phy_swap_hal_antenna(rtwdev); 5845 rtw89_phy_antdiv_set_ant(rtwdev); 5846 } 5847 5848 antdiv->training_count++; 5849 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 5850 state_period); 5851 } 5852 5853 void rtw89_phy_antdiv_work(struct work_struct *work) 5854 { 5855 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 5856 antdiv_work.work); 5857 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5858 5859 mutex_lock(&rtwdev->mutex); 5860 5861 if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) { 5862 rtw89_phy_antdiv_training_state(rtwdev); 5863 } else { 5864 rtw89_phy_antdiv_decision_state(rtwdev); 5865 rtw89_phy_antdiv_set_ant(rtwdev); 5866 } 5867 5868 mutex_unlock(&rtwdev->mutex); 5869 } 5870 5871 void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev) 5872 { 5873 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5874 struct rtw89_hal *hal = &rtwdev->hal; 5875 u8 rssi, rssi_pre; 5876 5877 if (!hal->ant_diversity || hal->ant_diversity_fixed) 5878 return; 5879 5880 rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats); 5881 rssi_pre = antdiv->rssi_pre; 5882 antdiv->rssi_pre = rssi; 5883 rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats); 5884 5885 if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH) 5886 return; 5887 5888 antdiv->training_count = 0; 5889 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0); 5890 } 5891 5892 static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev) 5893 { 5894 rtw89_phy_ccx_top_setting_init(rtwdev); 5895 rtw89_phy_ifs_clm_setting_init(rtwdev); 5896 } 5897 5898 static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev) 5899 { 5900 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 5901 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 5902 5903 memset(edcca_bak, 0, sizeof(*edcca_bak)); 5904 5905 if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) { 5906 rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0); 5907 rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2); 5908 rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1); 5909 rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0); 5910 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0); 5911 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0); 5912 rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0); 5913 rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1); 5914 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1); 5915 } 5916 5917 rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st, 5918 edcca_regs->tx_collision_t2r_st_mask, 0x29); 5919 } 5920 5921 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) 5922 { 5923 rtw89_phy_stat_init(rtwdev); 5924 5925 rtw89_chip_bb_sethw(rtwdev); 5926 5927 rtw89_phy_env_monitor_init(rtwdev); 5928 rtw89_physts_parsing_init(rtwdev); 5929 rtw89_phy_dig_init(rtwdev); 5930 rtw89_phy_cfo_init(rtwdev); 5931 rtw89_phy_bb_wrap_init(rtwdev); 5932 rtw89_phy_edcca_init(rtwdev); 5933 rtw89_phy_ch_info_init(rtwdev); 5934 rtw89_phy_ul_tb_info_init(rtwdev); 5935 rtw89_phy_antdiv_init(rtwdev); 5936 rtw89_chip_rfe_gpio(rtwdev); 5937 rtw89_phy_antdiv_set_ant(rtwdev); 5938 5939 rtw89_chip_rfk_hw_init(rtwdev); 5940 rtw89_phy_init_rf_nctl(rtwdev); 5941 rtw89_chip_rfk_init(rtwdev); 5942 rtw89_chip_set_txpwr_ctrl(rtwdev); 5943 rtw89_chip_power_trim(rtwdev); 5944 rtw89_chip_cfg_txrx_path(rtwdev); 5945 } 5946 5947 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 5948 { 5949 const struct rtw89_chip_info *chip = rtwdev->chip; 5950 const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld; 5951 enum rtw89_phy_idx phy_idx = RTW89_PHY_0; 5952 u8 bss_color; 5953 5954 if (!vif->bss_conf.he_support || !vif->cfg.assoc) 5955 return; 5956 5957 bss_color = vif->bss_conf.he_bss_color.color; 5958 5959 rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1, 5960 phy_idx); 5961 rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT, 5962 bss_color, phy_idx); 5963 rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID, 5964 vif->cfg.aid, phy_idx); 5965 } 5966 5967 static void 5968 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 5969 { 5970 rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data); 5971 } 5972 5973 static void 5974 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 5975 { 5976 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); 5977 } 5978 5979 static void 5980 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 5981 { 5982 rtw89_phy_write32_set(rtwdev, def->addr, def->mask); 5983 } 5984 5985 static void 5986 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 5987 { 5988 rtw89_phy_write32_clr(rtwdev, def->addr, def->mask); 5989 } 5990 5991 static void 5992 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 5993 { 5994 udelay(def->data); 5995 } 5996 5997 static void 5998 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = { 5999 [RTW89_RFK_F_WRF] = _rfk_write_rf, 6000 [RTW89_RFK_F_WM] = _rfk_write32_mask, 6001 [RTW89_RFK_F_WS] = _rfk_write32_set, 6002 [RTW89_RFK_F_WC] = _rfk_write32_clr, 6003 [RTW89_RFK_F_DELAY] = _rfk_delay, 6004 }; 6005 6006 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM); 6007 6008 void 6009 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl) 6010 { 6011 const struct rtw89_reg5_def *p = tbl->defs; 6012 const struct rtw89_reg5_def *end = tbl->defs + tbl->size; 6013 6014 for (; p < end; p++) 6015 _rfk_handler[p->flag](rtwdev, p); 6016 } 6017 EXPORT_SYMBOL(rtw89_rfk_parser); 6018 6019 #define RTW89_TSSI_FAST_MODE_NUM 4 6020 6021 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = { 6022 {0xD934, 0xff0000}, 6023 {0xD934, 0xff000000}, 6024 {0xD938, 0xff}, 6025 {0xD934, 0xff00}, 6026 }; 6027 6028 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = { 6029 {0xD930, 0xff0000}, 6030 {0xD930, 0xff000000}, 6031 {0xD934, 0xff}, 6032 {0xD930, 0xff00}, 6033 }; 6034 6035 static 6036 void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev, 6037 enum rtw89_mac_idx mac_idx, 6038 enum rtw89_tssi_bandedge_cfg bandedge_cfg, 6039 u32 val) 6040 { 6041 const struct rtw89_reg_def *regs; 6042 u32 reg; 6043 int i; 6044 6045 if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) 6046 regs = rtw89_tssi_fastmode_regs_flat; 6047 else 6048 regs = rtw89_tssi_fastmode_regs_level; 6049 6050 for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) { 6051 reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); 6052 rtw89_write32_mask(rtwdev, reg, regs[i].mask, val); 6053 } 6054 } 6055 6056 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = { 6057 {0xD91C, 0xff000000}, 6058 {0xD920, 0xff}, 6059 {0xD920, 0xff00}, 6060 {0xD920, 0xff0000}, 6061 {0xD920, 0xff000000}, 6062 {0xD924, 0xff}, 6063 {0xD924, 0xff00}, 6064 {0xD914, 0xff000000}, 6065 {0xD918, 0xff}, 6066 {0xD918, 0xff00}, 6067 {0xD918, 0xff0000}, 6068 {0xD918, 0xff000000}, 6069 {0xD91C, 0xff}, 6070 {0xD91C, 0xff00}, 6071 {0xD91C, 0xff0000}, 6072 }; 6073 6074 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = { 6075 {0xD910, 0xff}, 6076 {0xD910, 0xff00}, 6077 {0xD910, 0xff0000}, 6078 {0xD910, 0xff000000}, 6079 {0xD914, 0xff}, 6080 {0xD914, 0xff00}, 6081 {0xD914, 0xff0000}, 6082 {0xD908, 0xff}, 6083 {0xD908, 0xff00}, 6084 {0xD908, 0xff0000}, 6085 {0xD908, 0xff000000}, 6086 {0xD90C, 0xff}, 6087 {0xD90C, 0xff00}, 6088 {0xD90C, 0xff0000}, 6089 {0xD90C, 0xff000000}, 6090 }; 6091 6092 void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, 6093 enum rtw89_mac_idx mac_idx, 6094 enum rtw89_tssi_bandedge_cfg bandedge_cfg) 6095 { 6096 const struct rtw89_chip_info *chip = rtwdev->chip; 6097 const struct rtw89_reg_def *regs; 6098 const u32 *data; 6099 u32 reg; 6100 int i; 6101 6102 if (bandedge_cfg >= RTW89_TSSI_CFG_NUM) 6103 return; 6104 6105 if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) 6106 regs = rtw89_tssi_bandedge_regs_flat; 6107 else 6108 regs = rtw89_tssi_bandedge_regs_level; 6109 6110 data = chip->tssi_dbw_table->data[bandedge_cfg]; 6111 6112 for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) { 6113 reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); 6114 rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]); 6115 } 6116 6117 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx); 6118 rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg); 6119 6120 rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg, 6121 data[RTW89_TSSI_SBW20]); 6122 } 6123 EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg); 6124 6125 static 6126 const u8 rtw89_ch_base_table[16] = {1, 0xff, 6127 36, 100, 132, 149, 0xff, 6128 1, 33, 65, 97, 129, 161, 193, 225, 0xff}; 6129 #define RTW89_CH_BASE_IDX_2G 0 6130 #define RTW89_CH_BASE_IDX_5G_FIRST 2 6131 #define RTW89_CH_BASE_IDX_5G_LAST 5 6132 #define RTW89_CH_BASE_IDX_6G_FIRST 7 6133 #define RTW89_CH_BASE_IDX_6G_LAST 14 6134 6135 #define RTW89_CH_BASE_IDX_MASK GENMASK(7, 4) 6136 #define RTW89_CH_OFFSET_MASK GENMASK(3, 0) 6137 6138 u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band) 6139 { 6140 u8 chan_idx; 6141 u8 last, first; 6142 u8 idx; 6143 6144 switch (band) { 6145 case RTW89_BAND_2G: 6146 chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) | 6147 FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch); 6148 return chan_idx; 6149 case RTW89_BAND_5G: 6150 first = RTW89_CH_BASE_IDX_5G_FIRST; 6151 last = RTW89_CH_BASE_IDX_5G_LAST; 6152 break; 6153 case RTW89_BAND_6G: 6154 first = RTW89_CH_BASE_IDX_6G_FIRST; 6155 last = RTW89_CH_BASE_IDX_6G_LAST; 6156 break; 6157 default: 6158 rtw89_warn(rtwdev, "Unsupported band %d\n", band); 6159 return 0; 6160 } 6161 6162 for (idx = last; idx >= first; idx--) 6163 if (central_ch >= rtw89_ch_base_table[idx]) 6164 break; 6165 6166 if (idx < first) { 6167 rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch); 6168 return 0; 6169 } 6170 6171 chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) | 6172 FIELD_PREP(RTW89_CH_OFFSET_MASK, 6173 (central_ch - rtw89_ch_base_table[idx]) >> 1); 6174 return chan_idx; 6175 } 6176 EXPORT_SYMBOL(rtw89_encode_chan_idx); 6177 6178 void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx, 6179 u8 *ch, enum nl80211_band *band) 6180 { 6181 u8 idx, offset; 6182 6183 idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx); 6184 offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx); 6185 6186 if (idx == RTW89_CH_BASE_IDX_2G) { 6187 *band = NL80211_BAND_2GHZ; 6188 *ch = offset; 6189 return; 6190 } 6191 6192 *band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ; 6193 *ch = rtw89_ch_base_table[idx] + (offset << 1); 6194 } 6195 EXPORT_SYMBOL(rtw89_decode_chan_idx); 6196 6197 void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan) 6198 { 6199 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6200 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6201 6202 if (scan) { 6203 edcca_bak->a = 6204 rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, 6205 edcca_regs->edcca_mask); 6206 edcca_bak->p = 6207 rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, 6208 edcca_regs->edcca_p_mask); 6209 edcca_bak->ppdu = 6210 rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level, 6211 edcca_regs->ppdu_mask); 6212 6213 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6214 edcca_regs->edcca_mask, EDCCA_MAX); 6215 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6216 edcca_regs->edcca_p_mask, EDCCA_MAX); 6217 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6218 edcca_regs->ppdu_mask, EDCCA_MAX); 6219 } else { 6220 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6221 edcca_regs->edcca_mask, 6222 edcca_bak->a); 6223 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6224 edcca_regs->edcca_p_mask, 6225 edcca_bak->p); 6226 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6227 edcca_regs->ppdu_mask, 6228 edcca_bak->ppdu); 6229 } 6230 } 6231 6232 static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) 6233 { 6234 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6235 bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80; 6236 s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80; 6237 u8 path, per20_bitmap; 6238 u8 pwdb[8]; 6239 u32 tmp; 6240 6241 if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA)) 6242 return; 6243 6244 if (rtwdev->chip->chip_id == RTL8922A) 6245 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6246 edcca_regs->rpt_sel_be_mask, 0); 6247 6248 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6249 edcca_regs->rpt_sel_mask, 0); 6250 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6251 path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK); 6252 flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80); 6253 flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40); 6254 flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20); 6255 flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20); 6256 flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB); 6257 pwdb_s20 = u32_get_bits(tmp, MASKBYTE1); 6258 pwdb_p20 = u32_get_bits(tmp, MASKBYTE2); 6259 pwdb_fb = u32_get_bits(tmp, MASKBYTE3); 6260 6261 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6262 edcca_regs->rpt_sel_mask, 4); 6263 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6264 pwdb_s80 = u32_get_bits(tmp, MASKBYTE1); 6265 pwdb_s40 = u32_get_bits(tmp, MASKBYTE2); 6266 6267 per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a, 6268 MASKBYTE0); 6269 6270 if (rtwdev->chip->chip_id == RTL8922A) { 6271 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6272 edcca_regs->rpt_sel_be_mask, 4); 6273 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6274 pwdb[0] = u32_get_bits(tmp, MASKBYTE3); 6275 pwdb[1] = u32_get_bits(tmp, MASKBYTE2); 6276 pwdb[2] = u32_get_bits(tmp, MASKBYTE1); 6277 pwdb[3] = u32_get_bits(tmp, MASKBYTE0); 6278 6279 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6280 edcca_regs->rpt_sel_be_mask, 5); 6281 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6282 pwdb[4] = u32_get_bits(tmp, MASKBYTE3); 6283 pwdb[5] = u32_get_bits(tmp, MASKBYTE2); 6284 pwdb[6] = u32_get_bits(tmp, MASKBYTE1); 6285 pwdb[7] = u32_get_bits(tmp, MASKBYTE0); 6286 } else { 6287 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6288 edcca_regs->rpt_sel_mask, 0); 6289 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6290 pwdb[0] = u32_get_bits(tmp, MASKBYTE3); 6291 pwdb[1] = u32_get_bits(tmp, MASKBYTE2); 6292 6293 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6294 edcca_regs->rpt_sel_mask, 1); 6295 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6296 pwdb[2] = u32_get_bits(tmp, MASKBYTE3); 6297 pwdb[3] = u32_get_bits(tmp, MASKBYTE2); 6298 6299 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6300 edcca_regs->rpt_sel_mask, 2); 6301 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6302 pwdb[4] = u32_get_bits(tmp, MASKBYTE3); 6303 pwdb[5] = u32_get_bits(tmp, MASKBYTE2); 6304 6305 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6306 edcca_regs->rpt_sel_mask, 3); 6307 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6308 pwdb[6] = u32_get_bits(tmp, MASKBYTE3); 6309 pwdb[7] = u32_get_bits(tmp, MASKBYTE2); 6310 } 6311 6312 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6313 "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap); 6314 6315 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6316 "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n", 6317 pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5], 6318 pwdb[6], pwdb[7]); 6319 6320 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6321 "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n", 6322 path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80); 6323 6324 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6325 "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n", 6326 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80); 6327 } 6328 6329 static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev) 6330 { 6331 struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; 6332 bool is_linked = rtwdev->total_sta_assoc > 0; 6333 u8 rssi_min = ch_info->rssi_min >> 1; 6334 u8 edcca_thre; 6335 6336 if (!is_linked) { 6337 edcca_thre = EDCCA_MAX; 6338 } else { 6339 edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER - 6340 EDCCA_TH_REF; 6341 edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB); 6342 } 6343 6344 return edcca_thre; 6345 } 6346 6347 void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev) 6348 { 6349 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6350 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6351 u8 th; 6352 6353 th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev); 6354 if (th == edcca_bak->th_old) 6355 return; 6356 6357 edcca_bak->th_old = th; 6358 6359 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6360 "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th); 6361 6362 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6363 edcca_regs->edcca_mask, th); 6364 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6365 edcca_regs->edcca_p_mask, th); 6366 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6367 edcca_regs->ppdu_mask, th); 6368 } 6369 6370 void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev) 6371 { 6372 struct rtw89_hal *hal = &rtwdev->hal; 6373 6374 if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA)) 6375 return; 6376 6377 rtw89_phy_edcca_thre_calc(rtwdev); 6378 rtw89_phy_edcca_log(rtwdev); 6379 } 6380 6381 enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, 6382 enum rtw89_phy_idx phy_idx) 6383 { 6384 rtw89_debug(rtwdev, RTW89_DBG_RFK, 6385 "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n", 6386 rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx); 6387 6388 switch (rtwdev->mlo_dbcc_mode) { 6389 case MLO_1_PLUS_1_1RF: 6390 if (phy_idx == RTW89_PHY_0) 6391 return RF_A; 6392 else 6393 return RF_B; 6394 case MLO_1_PLUS_1_2RF: 6395 if (phy_idx == RTW89_PHY_0) 6396 return RF_A; 6397 else 6398 return RF_D; 6399 case MLO_0_PLUS_2_1RF: 6400 case MLO_2_PLUS_0_1RF: 6401 if (phy_idx == RTW89_PHY_0) 6402 return RF_AB; 6403 else 6404 return RF_AB; 6405 case MLO_0_PLUS_2_2RF: 6406 case MLO_2_PLUS_0_2RF: 6407 case MLO_2_PLUS_2_2RF: 6408 default: 6409 if (phy_idx == RTW89_PHY_0) 6410 return RF_AB; 6411 else 6412 return RF_CD; 6413 } 6414 } 6415 EXPORT_SYMBOL(rtw89_phy_get_kpath); 6416 6417 enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev, 6418 enum rtw89_phy_idx phy_idx) 6419 { 6420 rtw89_debug(rtwdev, RTW89_DBG_RFK, 6421 "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n", 6422 rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx); 6423 6424 switch (rtwdev->mlo_dbcc_mode) { 6425 case MLO_1_PLUS_1_1RF: 6426 if (phy_idx == RTW89_PHY_0) 6427 return RF_PATH_A; 6428 else 6429 return RF_PATH_B; 6430 case MLO_1_PLUS_1_2RF: 6431 if (phy_idx == RTW89_PHY_0) 6432 return RF_PATH_A; 6433 else 6434 return RF_PATH_D; 6435 case MLO_0_PLUS_2_1RF: 6436 case MLO_2_PLUS_0_1RF: 6437 if (phy_idx == RTW89_PHY_0) 6438 return RF_PATH_A; 6439 else 6440 return RF_PATH_B; 6441 case MLO_0_PLUS_2_2RF: 6442 case MLO_2_PLUS_0_2RF: 6443 case MLO_2_PLUS_2_2RF: 6444 default: 6445 if (phy_idx == RTW89_PHY_0) 6446 return RF_PATH_A; 6447 else 6448 return RF_PATH_C; 6449 } 6450 } 6451 EXPORT_SYMBOL(rtw89_phy_get_syn_sel); 6452 6453 static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = { 6454 .setting_addr = R_CCX, 6455 .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK, 6456 .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK, 6457 .trig_opt_mask = B_CCX_TRIG_OPT_MSK, 6458 .en_mask = B_CCX_EN_MSK, 6459 .ifs_cnt_addr = R_IFS_COUNTER, 6460 .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK, 6461 .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK, 6462 .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK, 6463 .ifs_collect_en_mask = B_IFS_COLLECT_EN, 6464 .ifs_t1_addr = R_IFS_T1, 6465 .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK, 6466 .ifs_t1_en_mask = B_IFS_T1_EN_MSK, 6467 .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK, 6468 .ifs_t2_addr = R_IFS_T2, 6469 .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK, 6470 .ifs_t2_en_mask = B_IFS_T2_EN_MSK, 6471 .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK, 6472 .ifs_t3_addr = R_IFS_T3, 6473 .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK, 6474 .ifs_t3_en_mask = B_IFS_T3_EN_MSK, 6475 .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK, 6476 .ifs_t4_addr = R_IFS_T4, 6477 .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK, 6478 .ifs_t4_en_mask = B_IFS_T4_EN_MSK, 6479 .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK, 6480 .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT, 6481 .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK, 6482 .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK, 6483 .ifs_clm_cca_addr = R_IFS_CLM_CCA, 6484 .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK, 6485 .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK, 6486 .ifs_clm_fa_addr = R_IFS_CLM_FA, 6487 .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK, 6488 .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK, 6489 .ifs_his_addr = R_IFS_HIS, 6490 .ifs_t4_his_mask = B_IFS_T4_HIS_MSK, 6491 .ifs_t3_his_mask = B_IFS_T3_HIS_MSK, 6492 .ifs_t2_his_mask = B_IFS_T2_HIS_MSK, 6493 .ifs_t1_his_mask = B_IFS_T1_HIS_MSK, 6494 .ifs_avg_l_addr = R_IFS_AVG_L, 6495 .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK, 6496 .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK, 6497 .ifs_avg_h_addr = R_IFS_AVG_H, 6498 .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK, 6499 .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK, 6500 .ifs_cca_l_addr = R_IFS_CCA_L, 6501 .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK, 6502 .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK, 6503 .ifs_cca_h_addr = R_IFS_CCA_H, 6504 .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK, 6505 .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK, 6506 .ifs_total_addr = R_IFSCNT, 6507 .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, 6508 .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, 6509 }; 6510 6511 static const struct rtw89_physts_regs rtw89_physts_regs_ax = { 6512 .setting_addr = R_PLCP_HISTOGRAM, 6513 .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL, 6514 .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, 6515 }; 6516 6517 static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = { 6518 .comp = R_DCFO_WEIGHT, 6519 .weighting_mask = B_DCFO_WEIGHT_MSK, 6520 .comp_seg0 = R_DCFO_OPT, 6521 .valid_0_mask = B_DCFO_OPT_EN, 6522 }; 6523 6524 const struct rtw89_phy_gen_def rtw89_phy_gen_ax = { 6525 .cr_base = 0x10000, 6526 .ccx = &rtw89_ccx_regs_ax, 6527 .physts = &rtw89_physts_regs_ax, 6528 .cfo = &rtw89_cfo_regs_ax, 6529 .phy0_phy1_offset = rtw89_phy0_phy1_offset_ax, 6530 .config_bb_gain = rtw89_phy_config_bb_gain_ax, 6531 .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax, 6532 .bb_wrap_init = NULL, 6533 .ch_info_init = NULL, 6534 6535 .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax, 6536 .set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax, 6537 .set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax, 6538 .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax, 6539 }; 6540 EXPORT_SYMBOL(rtw89_phy_gen_ax); 6541