1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "acpi.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "sar.h" 15 #include "txrx.h" 16 #include "util.h" 17 18 static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr) 19 { 20 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 21 22 return phy->phy0_phy1_offset(rtwdev, addr); 23 } 24 25 static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev, 26 const struct rtw89_ra_report *report) 27 { 28 u32 bit_rate = report->bit_rate; 29 30 /* lower than ofdm, do not aggregate */ 31 if (bit_rate < 550) 32 return 1; 33 34 /* avoid AMSDU for legacy rate */ 35 if (report->might_fallback_legacy) 36 return 1; 37 38 /* lower than 20M vht 2ss mcs8, make it small */ 39 if (bit_rate < 1800) 40 return 1200; 41 42 /* lower than 40M vht 2ss mcs9, make it medium */ 43 if (bit_rate < 4000) 44 return 2600; 45 46 /* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */ 47 if (bit_rate < 7000) 48 return 3500; 49 50 return rtwdev->chip->max_amsdu_limit; 51 } 52 53 static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap) 54 { 55 u64 ra_mask = 0; 56 u8 mcs_cap; 57 int i, nss; 58 59 for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) { 60 mcs_cap = mcs_map & 0x3; 61 switch (mcs_cap) { 62 case 2: 63 ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss; 64 break; 65 case 1: 66 ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss; 67 break; 68 case 0: 69 ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss; 70 break; 71 default: 72 break; 73 } 74 } 75 76 return ra_mask; 77 } 78 79 static u64 get_he_ra_mask(struct ieee80211_link_sta *link_sta) 80 { 81 struct ieee80211_sta_he_cap cap = link_sta->he_cap; 82 u16 mcs_map; 83 84 switch (link_sta->bandwidth) { 85 case IEEE80211_STA_RX_BW_160: 86 if (cap.he_cap_elem.phy_cap_info[0] & 87 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 88 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80); 89 else 90 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160); 91 break; 92 default: 93 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80); 94 } 95 96 /* MCS11, MCS9, MCS7 */ 97 return get_mcs_ra_mask(mcs_map, 11, 2); 98 } 99 100 static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss) 101 { 102 u64 nss_mcs_shift; 103 u64 nss_mcs_val; 104 u64 mask = 0; 105 int i, j; 106 u8 nss; 107 108 for (i = 0; i < n_nss; i++) { 109 nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX); 110 if (!nss) 111 continue; 112 113 nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0); 114 115 for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16) 116 mask |= nss_mcs_val << nss_mcs_shift; 117 } 118 119 return mask; 120 } 121 122 static u64 get_eht_ra_mask(struct ieee80211_link_sta *link_sta) 123 { 124 struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap; 125 struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz; 126 struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss; 127 u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info; 128 129 switch (link_sta->bandwidth) { 130 case IEEE80211_STA_RX_BW_320: 131 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320; 132 /* MCS 9, 11, 13 */ 133 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 134 case IEEE80211_STA_RX_BW_160: 135 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160; 136 /* MCS 9, 11, 13 */ 137 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 138 case IEEE80211_STA_RX_BW_20: 139 if (!(he_phy_cap[0] & 140 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { 141 mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz; 142 /* MCS 7, 9, 11, 13 */ 143 return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4); 144 } 145 fallthrough; 146 case IEEE80211_STA_RX_BW_80: 147 default: 148 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80; 149 /* MCS 9, 11, 13 */ 150 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 151 } 152 } 153 154 #define RA_FLOOR_TABLE_SIZE 7 155 #define RA_FLOOR_UP_GAP 3 156 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi, 157 u8 ratr_state) 158 { 159 u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100}; 160 u8 rssi_lv = 0; 161 u8 i; 162 163 rssi >>= 1; 164 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 165 if (i >= ratr_state) 166 rssi_lv_t[i] += RA_FLOOR_UP_GAP; 167 if (rssi < rssi_lv_t[i]) { 168 rssi_lv = i; 169 break; 170 } 171 } 172 if (rssi_lv == 0) 173 return 0xffffffffffffffffULL; 174 else if (rssi_lv == 1) 175 return 0xfffffffffffffff0ULL; 176 else if (rssi_lv == 2) 177 return 0xffffffffffffefe0ULL; 178 else if (rssi_lv == 3) 179 return 0xffffffffffffcfc0ULL; 180 else if (rssi_lv == 4) 181 return 0xffffffffffff8f80ULL; 182 else if (rssi_lv >= 5) 183 return 0xffffffffffff0f00ULL; 184 185 return 0xffffffffffffffffULL; 186 } 187 188 static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak) 189 { 190 if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0) 191 ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); 192 193 if (ra_mask == 0) 194 ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); 195 196 return ra_mask; 197 } 198 199 static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, 200 struct rtw89_sta_link *rtwsta_link, 201 struct ieee80211_link_sta *link_sta, 202 const struct rtw89_chan *chan) 203 { 204 struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask; 205 enum nl80211_band band; 206 u64 cfg_mask; 207 208 if (!rtwsta_link->use_cfg_mask) 209 return -1; 210 211 switch (chan->band_type) { 212 case RTW89_BAND_2G: 213 band = NL80211_BAND_2GHZ; 214 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy, 215 RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES); 216 break; 217 case RTW89_BAND_5G: 218 band = NL80211_BAND_5GHZ; 219 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy, 220 RA_MASK_OFDM_RATES); 221 break; 222 case RTW89_BAND_6G: 223 band = NL80211_BAND_6GHZ; 224 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy, 225 RA_MASK_OFDM_RATES); 226 break; 227 default: 228 rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type); 229 return -1; 230 } 231 232 if (link_sta->he_cap.has_he) { 233 cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0], 234 RA_MASK_HE_1SS_RATES); 235 cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1], 236 RA_MASK_HE_2SS_RATES); 237 } else if (link_sta->vht_cap.vht_supported) { 238 cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], 239 RA_MASK_VHT_1SS_RATES); 240 cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], 241 RA_MASK_VHT_2SS_RATES); 242 } else if (link_sta->ht_cap.ht_supported) { 243 cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], 244 RA_MASK_HT_1SS_RATES); 245 cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], 246 RA_MASK_HT_2SS_RATES); 247 } 248 249 return cfg_mask; 250 } 251 252 static const u64 253 rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES, 254 RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES}; 255 static const u64 256 rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES, 257 RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES}; 258 static const u64 259 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES, 260 RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES}; 261 static const u64 262 rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES, 263 RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES}; 264 static const u64 265 rtw89_ra_mask_eht_mcs0_11[4] = {RA_MASK_EHT_1SS_MCS0_11, RA_MASK_EHT_2SS_MCS0_11, 266 RA_MASK_EHT_3SS_MCS0_11, RA_MASK_EHT_4SS_MCS0_11}; 267 268 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev, 269 struct rtw89_sta_link *rtwsta_link, 270 struct ieee80211_link_sta *link_sta, 271 const struct rtw89_chan *chan, 272 bool *fix_giltf_en, u8 *fix_giltf) 273 { 274 struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask; 275 u8 band = chan->band_type; 276 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 277 u8 he_ltf = mask->control[nl_band].he_ltf; 278 u8 he_gi = mask->control[nl_band].he_gi; 279 280 *fix_giltf_en = true; 281 282 if (rtwdev->chip->chip_id == RTL8852C && 283 chan->band_width == RTW89_CHANNEL_WIDTH_160 && 284 rtw89_sta_link_has_su_mu_4xhe08(link_sta)) 285 *fix_giltf = RTW89_GILTF_SGI_4XHE08; 286 else 287 *fix_giltf = RTW89_GILTF_2XHE08; 288 289 if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he)) 290 return; 291 292 if (he_ltf == 2 && he_gi == 2) { 293 *fix_giltf = RTW89_GILTF_LGI_4XHE32; 294 } else if (he_ltf == 2 && he_gi == 0) { 295 *fix_giltf = RTW89_GILTF_SGI_4XHE08; 296 } else if (he_ltf == 1 && he_gi == 1) { 297 *fix_giltf = RTW89_GILTF_2XHE16; 298 } else if (he_ltf == 1 && he_gi == 0) { 299 *fix_giltf = RTW89_GILTF_2XHE08; 300 } else if (he_ltf == 0 && he_gi == 1) { 301 *fix_giltf = RTW89_GILTF_1XHE16; 302 } else if (he_ltf == 0 && he_gi == 0) { 303 *fix_giltf = RTW89_GILTF_1XHE08; 304 } 305 } 306 307 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, 308 struct rtw89_vif_link *rtwvif_link, 309 struct rtw89_sta_link *rtwsta_link, 310 struct ieee80211_link_sta *link_sta, 311 bool p2p, bool csi) 312 { 313 struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern; 314 struct rtw89_ra_info *ra = &rtwsta_link->ra; 315 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 316 rtwvif_link->chanctx_idx); 317 const u64 *high_rate_masks = rtw89_ra_mask_ht_rates; 318 u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi); 319 u64 ra_mask = 0; 320 u64 ra_mask_bak; 321 u8 mode = 0; 322 u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY; 323 u8 bw_mode = 0; 324 u8 stbc_en = 0; 325 u8 ldpc_en = 0; 326 u8 fix_giltf = 0; 327 u8 i; 328 bool sgi = false; 329 bool fix_giltf_en = false; 330 331 memset(ra, 0, sizeof(*ra)); 332 /* Set the ra mask from sta's capability */ 333 if (link_sta->eht_cap.has_eht) { 334 mode |= RTW89_RA_MODE_EHT; 335 ra_mask |= get_eht_ra_mask(link_sta); 336 337 if (rtwdev->hal.no_mcs_12_13) 338 high_rate_masks = rtw89_ra_mask_eht_mcs0_11; 339 else 340 high_rate_masks = rtw89_ra_mask_eht_rates; 341 342 rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta, 343 chan, &fix_giltf_en, &fix_giltf); 344 } else if (link_sta->he_cap.has_he) { 345 mode |= RTW89_RA_MODE_HE; 346 csi_mode = RTW89_RA_RPT_MODE_HE; 347 ra_mask |= get_he_ra_mask(link_sta); 348 high_rate_masks = rtw89_ra_mask_he_rates; 349 if (link_sta->he_cap.he_cap_elem.phy_cap_info[2] & 350 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) 351 stbc_en = 1; 352 if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] & 353 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) 354 ldpc_en = 1; 355 rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta, 356 chan, &fix_giltf_en, &fix_giltf); 357 } else if (link_sta->vht_cap.vht_supported) { 358 u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map); 359 360 mode |= RTW89_RA_MODE_VHT; 361 csi_mode = RTW89_RA_RPT_MODE_VHT; 362 /* MCS9 (non-20MHz), MCS8, MCS7 */ 363 if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) 364 ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1); 365 else 366 ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1); 367 high_rate_masks = rtw89_ra_mask_vht_rates; 368 if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) 369 stbc_en = 1; 370 if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) 371 ldpc_en = 1; 372 } else if (link_sta->ht_cap.ht_supported) { 373 mode |= RTW89_RA_MODE_HT; 374 csi_mode = RTW89_RA_RPT_MODE_HT; 375 ra_mask |= ((u64)link_sta->ht_cap.mcs.rx_mask[3] << 48) | 376 ((u64)link_sta->ht_cap.mcs.rx_mask[2] << 36) | 377 ((u64)link_sta->ht_cap.mcs.rx_mask[1] << 24) | 378 ((u64)link_sta->ht_cap.mcs.rx_mask[0] << 12); 379 high_rate_masks = rtw89_ra_mask_ht_rates; 380 if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) 381 stbc_en = 1; 382 if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) 383 ldpc_en = 1; 384 } 385 386 switch (chan->band_type) { 387 case RTW89_BAND_2G: 388 ra_mask |= link_sta->supp_rates[NL80211_BAND_2GHZ]; 389 if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xf) 390 mode |= RTW89_RA_MODE_CCK; 391 if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0) 392 mode |= RTW89_RA_MODE_OFDM; 393 break; 394 case RTW89_BAND_5G: 395 ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_5GHZ] << 4; 396 mode |= RTW89_RA_MODE_OFDM; 397 break; 398 case RTW89_BAND_6G: 399 ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_6GHZ] << 4; 400 mode |= RTW89_RA_MODE_OFDM; 401 break; 402 default: 403 rtw89_err(rtwdev, "Unknown band type\n"); 404 break; 405 } 406 407 ra_mask_bak = ra_mask; 408 409 if (mode >= RTW89_RA_MODE_HT) { 410 u64 mask = 0; 411 for (i = 0; i < rtwdev->hal.tx_nss; i++) 412 mask |= high_rate_masks[i]; 413 if (mode & RTW89_RA_MODE_OFDM) 414 mask |= RA_MASK_SUBOFDM_RATES; 415 if (mode & RTW89_RA_MODE_CCK) 416 mask |= RA_MASK_SUBCCK_RATES; 417 ra_mask &= mask; 418 } else if (mode & RTW89_RA_MODE_OFDM) { 419 ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES); 420 } 421 422 if (mode != RTW89_RA_MODE_CCK) 423 ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0); 424 425 ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak); 426 ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan); 427 428 switch (link_sta->bandwidth) { 429 case IEEE80211_STA_RX_BW_160: 430 bw_mode = RTW89_CHANNEL_WIDTH_160; 431 sgi = link_sta->vht_cap.vht_supported && 432 (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160); 433 break; 434 case IEEE80211_STA_RX_BW_80: 435 bw_mode = RTW89_CHANNEL_WIDTH_80; 436 sgi = link_sta->vht_cap.vht_supported && 437 (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); 438 break; 439 case IEEE80211_STA_RX_BW_40: 440 bw_mode = RTW89_CHANNEL_WIDTH_40; 441 sgi = link_sta->ht_cap.ht_supported && 442 (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); 443 break; 444 default: 445 bw_mode = RTW89_CHANNEL_WIDTH_20; 446 sgi = link_sta->ht_cap.ht_supported && 447 (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); 448 break; 449 } 450 451 if (link_sta->he_cap.he_cap_elem.phy_cap_info[3] & 452 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM) 453 ra->dcm_cap = 1; 454 455 if (rate_pattern->enable && !p2p) { 456 ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan); 457 ra_mask &= rate_pattern->ra_mask; 458 mode = rate_pattern->ra_mode; 459 } 460 461 ra->bw_cap = bw_mode; 462 ra->er_cap = rtwsta_link->er_cap; 463 ra->mode_ctrl = mode; 464 ra->macid = rtwsta_link->mac_id; 465 ra->stbc_cap = stbc_en; 466 ra->ldpc_cap = ldpc_en; 467 ra->ss_num = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 468 ra->en_sgi = sgi; 469 ra->ra_mask = ra_mask; 470 ra->fix_giltf_en = fix_giltf_en; 471 ra->fix_giltf = fix_giltf; 472 473 if (!csi) 474 return; 475 476 ra->fixed_csi_rate_en = false; 477 ra->ra_csi_rate_en = true; 478 ra->cr_tbl_sel = false; 479 ra->band_num = rtwvif_link->phy_idx; 480 ra->csi_bw = bw_mode; 481 ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32; 482 ra->csi_mcs_ss_idx = 5; 483 ra->csi_mode = csi_mode; 484 } 485 486 void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev, 487 struct rtw89_sta_link *rtwsta_link, 488 u32 changed) 489 { 490 struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link; 491 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 492 struct rtw89_ra_info *ra = &rtwsta_link->ra; 493 struct ieee80211_link_sta *link_sta; 494 495 rcu_read_lock(); 496 497 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 498 rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link, 499 link_sta, vif->p2p, false); 500 501 rcu_read_unlock(); 502 503 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) 504 ra->upd_mask = 1; 505 if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED)) 506 ra->upd_bw_nss_mask = 1; 507 508 rtw89_debug(rtwdev, RTW89_DBG_RA, 509 "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d", 510 ra->macid, 511 ra->bw_cap, 512 ra->ss_num, 513 ra->en_sgi, 514 ra->giltf); 515 516 rtw89_fw_h2c_ra(rtwdev, ra, false); 517 } 518 519 void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, 520 u32 changed) 521 { 522 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 523 struct rtw89_sta_link *rtwsta_link; 524 unsigned int link_id; 525 526 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) 527 rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed); 528 } 529 530 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next, 531 u16 rate_base, u64 ra_mask, u8 ra_mode, 532 u32 rate_ctrl, u32 ctrl_skip, bool force) 533 { 534 u8 n, c; 535 536 if (rate_ctrl == ctrl_skip) 537 return true; 538 539 n = hweight32(rate_ctrl); 540 if (n == 0) 541 return true; 542 543 if (force && n != 1) 544 return false; 545 546 if (next->enable) 547 return false; 548 549 c = __fls(rate_ctrl); 550 next->rate = rate_base + c; 551 next->ra_mode = ra_mode; 552 next->ra_mask = ra_mask; 553 next->enable = true; 554 555 return true; 556 } 557 558 #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \ 559 { \ 560 [RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \ 561 [RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \ 562 } 563 564 static 565 void __rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, 566 struct rtw89_vif_link *rtwvif_link, 567 const struct cfg80211_bitrate_mask *mask) 568 { 569 struct ieee80211_supported_band *sband; 570 struct rtw89_phy_rate_pattern next_pattern = {0}; 571 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 572 rtwvif_link->chanctx_idx); 573 static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = { 574 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0), 575 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0), 576 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0), 577 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0), 578 }; 579 static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = { 580 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0), 581 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0), 582 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0), 583 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0), 584 }; 585 static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = { 586 RTW89_HW_RATE_BY_CHIP_GEN(MCS0), 587 RTW89_HW_RATE_BY_CHIP_GEN(MCS8), 588 RTW89_HW_RATE_BY_CHIP_GEN(MCS16), 589 RTW89_HW_RATE_BY_CHIP_GEN(MCS24), 590 }; 591 u8 band = chan->band_type; 592 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 593 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 594 u8 tx_nss = rtwdev->hal.tx_nss; 595 u8 i; 596 597 for (i = 0; i < tx_nss; i++) 598 if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen], 599 RA_MASK_HE_RATES, RTW89_RA_MODE_HE, 600 mask->control[nl_band].he_mcs[i], 601 0, true)) 602 goto out; 603 604 for (i = 0; i < tx_nss; i++) 605 if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen], 606 RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT, 607 mask->control[nl_band].vht_mcs[i], 608 0, true)) 609 goto out; 610 611 for (i = 0; i < tx_nss; i++) 612 if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen], 613 RA_MASK_HT_RATES, RTW89_RA_MODE_HT, 614 mask->control[nl_band].ht_mcs[i], 615 0, true)) 616 goto out; 617 618 /* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and 619 * require at least one basic rate for ieee80211_set_bitrate_mask, 620 * so the decision just depends on if all bitrates are set or not. 621 */ 622 sband = rtwdev->hw->wiphy->bands[nl_band]; 623 if (band == RTW89_BAND_2G) { 624 if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1, 625 RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES, 626 RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM, 627 mask->control[nl_band].legacy, 628 BIT(sband->n_bitrates) - 1, false)) 629 goto out; 630 } else { 631 if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6, 632 RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM, 633 mask->control[nl_band].legacy, 634 BIT(sband->n_bitrates) - 1, false)) 635 goto out; 636 } 637 638 if (!next_pattern.enable) 639 goto out; 640 641 rtwvif_link->rate_pattern = next_pattern; 642 rtw89_debug(rtwdev, RTW89_DBG_RA, 643 #if defined(__linux__) 644 "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n", 645 #elif defined(__FreeBSD__) 646 "configure pattern: rate 0x%x, mask 0x%jx, mode 0x%x\n", 647 #endif 648 next_pattern.rate, 649 #if defined(__FreeBSD__) 650 (uintmax_t) 651 #endif 652 next_pattern.ra_mask, 653 next_pattern.ra_mode); 654 return; 655 656 out: 657 rtwvif_link->rate_pattern.enable = false; 658 rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n"); 659 } 660 661 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, 662 struct ieee80211_vif *vif, 663 const struct cfg80211_bitrate_mask *mask) 664 { 665 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 666 struct rtw89_vif_link *rtwvif_link; 667 unsigned int link_id; 668 669 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 670 __rtw89_phy_rate_pattern_vif(rtwdev, rtwvif_link, mask); 671 } 672 673 static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta) 674 { 675 struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; 676 677 rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); 678 } 679 680 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev) 681 { 682 ieee80211_iterate_stations_atomic(rtwdev->hw, 683 rtw89_phy_ra_update_sta_iter, 684 rtwdev); 685 } 686 687 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link) 688 { 689 struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link; 690 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 691 struct rtw89_ra_info *ra = &rtwsta_link->ra; 692 u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR; 693 struct ieee80211_link_sta *link_sta; 694 bool csi; 695 696 rcu_read_lock(); 697 698 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 699 csi = rtw89_sta_has_beamformer_cap(link_sta); 700 701 rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link, 702 link_sta, vif->p2p, csi); 703 704 rcu_read_unlock(); 705 706 if (rssi > 40) 707 ra->init_rate_lv = 1; 708 else if (rssi > 20) 709 ra->init_rate_lv = 2; 710 else if (rssi > 1) 711 ra->init_rate_lv = 3; 712 else 713 ra->init_rate_lv = 0; 714 ra->upd_all = 1; 715 rtw89_debug(rtwdev, RTW89_DBG_RA, 716 "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d", 717 ra->macid, 718 ra->mode_ctrl, 719 ra->bw_cap, 720 ra->ss_num, 721 ra->init_rate_lv); 722 rtw89_debug(rtwdev, RTW89_DBG_RA, 723 "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d", 724 ra->dcm_cap, 725 ra->er_cap, 726 ra->ldpc_cap, 727 ra->stbc_cap, 728 ra->en_sgi, 729 ra->giltf); 730 731 rtw89_fw_h2c_ra(rtwdev, ra, csi); 732 } 733 734 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev, 735 const struct rtw89_chan *chan, 736 enum rtw89_bandwidth dbw) 737 { 738 enum rtw89_bandwidth cbw = chan->band_width; 739 u8 pri_ch = chan->primary_channel; 740 u8 central_ch = chan->channel; 741 u8 txsc_idx = 0; 742 u8 tmp = 0; 743 744 if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) 745 return txsc_idx; 746 747 switch (cbw) { 748 case RTW89_CHANNEL_WIDTH_40: 749 txsc_idx = pri_ch > central_ch ? 1 : 2; 750 break; 751 case RTW89_CHANNEL_WIDTH_80: 752 if (dbw == RTW89_CHANNEL_WIDTH_20) { 753 if (pri_ch > central_ch) 754 txsc_idx = (pri_ch - central_ch) >> 1; 755 else 756 txsc_idx = ((central_ch - pri_ch) >> 1) + 1; 757 } else { 758 txsc_idx = pri_ch > central_ch ? 9 : 10; 759 } 760 break; 761 case RTW89_CHANNEL_WIDTH_160: 762 if (pri_ch > central_ch) 763 tmp = (pri_ch - central_ch) >> 1; 764 else 765 tmp = ((central_ch - pri_ch) >> 1) + 1; 766 767 if (dbw == RTW89_CHANNEL_WIDTH_20) { 768 txsc_idx = tmp; 769 } else if (dbw == RTW89_CHANNEL_WIDTH_40) { 770 if (tmp == 1 || tmp == 3) 771 txsc_idx = 9; 772 else if (tmp == 5 || tmp == 7) 773 txsc_idx = 11; 774 else if (tmp == 2 || tmp == 4) 775 txsc_idx = 10; 776 else if (tmp == 6 || tmp == 8) 777 txsc_idx = 12; 778 else 779 return 0xff; 780 } else { 781 txsc_idx = pri_ch > central_ch ? 13 : 14; 782 } 783 break; 784 case RTW89_CHANNEL_WIDTH_80_80: 785 if (dbw == RTW89_CHANNEL_WIDTH_20) { 786 if (pri_ch > central_ch) 787 txsc_idx = (10 - (pri_ch - central_ch)) >> 1; 788 else 789 txsc_idx = ((central_ch - pri_ch) >> 1) + 5; 790 } else if (dbw == RTW89_CHANNEL_WIDTH_40) { 791 txsc_idx = pri_ch > central_ch ? 10 : 12; 792 } else { 793 txsc_idx = 14; 794 } 795 break; 796 default: 797 break; 798 } 799 800 return txsc_idx; 801 } 802 EXPORT_SYMBOL(rtw89_phy_get_txsc); 803 804 u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, 805 enum rtw89_bandwidth dbw) 806 { 807 enum rtw89_bandwidth cbw = chan->band_width; 808 u8 pri_ch = chan->primary_channel; 809 u8 central_ch = chan->channel; 810 u8 txsb_idx = 0; 811 812 if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) 813 return txsb_idx; 814 815 switch (cbw) { 816 case RTW89_CHANNEL_WIDTH_40: 817 txsb_idx = pri_ch > central_ch ? 1 : 0; 818 break; 819 case RTW89_CHANNEL_WIDTH_80: 820 if (dbw == RTW89_CHANNEL_WIDTH_20) 821 txsb_idx = (pri_ch - central_ch + 6) / 4; 822 else 823 txsb_idx = pri_ch > central_ch ? 1 : 0; 824 break; 825 case RTW89_CHANNEL_WIDTH_160: 826 if (dbw == RTW89_CHANNEL_WIDTH_20) 827 txsb_idx = (pri_ch - central_ch + 14) / 4; 828 else if (dbw == RTW89_CHANNEL_WIDTH_40) 829 txsb_idx = (pri_ch - central_ch + 12) / 8; 830 else 831 txsb_idx = pri_ch > central_ch ? 1 : 0; 832 break; 833 case RTW89_CHANNEL_WIDTH_320: 834 if (dbw == RTW89_CHANNEL_WIDTH_20) 835 txsb_idx = (pri_ch - central_ch + 30) / 4; 836 else if (dbw == RTW89_CHANNEL_WIDTH_40) 837 txsb_idx = (pri_ch - central_ch + 28) / 8; 838 else if (dbw == RTW89_CHANNEL_WIDTH_80) 839 txsb_idx = (pri_ch - central_ch + 24) / 16; 840 else 841 txsb_idx = pri_ch > central_ch ? 1 : 0; 842 break; 843 default: 844 break; 845 } 846 847 return txsb_idx; 848 } 849 EXPORT_SYMBOL(rtw89_phy_get_txsb); 850 851 static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev) 852 { 853 return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) || 854 !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1); 855 } 856 857 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 858 u32 addr, u32 mask) 859 { 860 const struct rtw89_chip_info *chip = rtwdev->chip; 861 const u32 *base_addr = chip->rf_base_addr; 862 u32 val, direct_addr; 863 864 if (rf_path >= rtwdev->chip->rf_path_num) { 865 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 866 return INV_RF_DATA; 867 } 868 869 addr &= 0xff; 870 direct_addr = base_addr[rf_path] + (addr << 2); 871 mask &= RFREG_MASK; 872 873 val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask); 874 875 return val; 876 } 877 EXPORT_SYMBOL(rtw89_phy_read_rf); 878 879 static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev, 880 enum rtw89_rf_path rf_path, u32 addr, u32 mask) 881 { 882 bool busy; 883 bool done; 884 u32 val; 885 int ret; 886 887 ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy, 888 1, 30, false, rtwdev); 889 if (ret) { 890 rtw89_err(rtwdev, "read rf busy swsi\n"); 891 return INV_RF_DATA; 892 } 893 894 mask &= RFREG_MASK; 895 896 val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) | 897 FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr); 898 rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val); 899 udelay(2); 900 901 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1, 902 30, false, rtwdev, R_SWSI_V1, 903 B_SWSI_R_DATA_DONE_V1); 904 if (ret) { 905 rtw89_err(rtwdev, "read swsi busy\n"); 906 return INV_RF_DATA; 907 } 908 909 return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask); 910 } 911 912 u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 913 u32 addr, u32 mask) 914 { 915 bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr); 916 917 if (rf_path >= rtwdev->chip->rf_path_num) { 918 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 919 return INV_RF_DATA; 920 } 921 922 if (ad_sel) 923 return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask); 924 else 925 return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask); 926 } 927 EXPORT_SYMBOL(rtw89_phy_read_rf_v1); 928 929 static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev, 930 enum rtw89_rf_path rf_path, u32 addr) 931 { 932 static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24}; 933 static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC}; 934 bool busy, done; 935 int ret; 936 u32 val; 937 938 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1); 939 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy, 940 1, 3800, false, 941 rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY); 942 if (ret) { 943 rtw89_warn(rtwdev, "poll HWSI is busy\n"); 944 return INV_RF_DATA; 945 } 946 947 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr); 948 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1); 949 udelay(2); 950 951 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 952 1, 3800, false, 953 rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE); 954 if (ret) { 955 rtw89_warn(rtwdev, "read HWSI is busy\n"); 956 val = INV_RF_DATA; 957 goto out; 958 } 959 960 val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK); 961 out: 962 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0); 963 964 return val; 965 } 966 967 static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev, 968 enum rtw89_rf_path rf_path, u32 addr, u32 mask) 969 { 970 u32 val; 971 972 val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr); 973 974 return (val & mask) >> __ffs(mask); 975 } 976 977 u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 978 u32 addr, u32 mask) 979 { 980 bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK); 981 982 if (rf_path >= rtwdev->chip->rf_path_num) { 983 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 984 return INV_RF_DATA; 985 } 986 987 if (ad_sel) 988 return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask); 989 else 990 return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask); 991 } 992 EXPORT_SYMBOL(rtw89_phy_read_rf_v2); 993 994 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 995 u32 addr, u32 mask, u32 data) 996 { 997 const struct rtw89_chip_info *chip = rtwdev->chip; 998 const u32 *base_addr = chip->rf_base_addr; 999 u32 direct_addr; 1000 1001 if (rf_path >= rtwdev->chip->rf_path_num) { 1002 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 1003 return false; 1004 } 1005 1006 addr &= 0xff; 1007 direct_addr = base_addr[rf_path] + (addr << 2); 1008 mask &= RFREG_MASK; 1009 1010 rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data); 1011 1012 /* delay to ensure writing properly */ 1013 udelay(1); 1014 1015 return true; 1016 } 1017 EXPORT_SYMBOL(rtw89_phy_write_rf); 1018 1019 static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev, 1020 enum rtw89_rf_path rf_path, u32 addr, u32 mask, 1021 u32 data) 1022 { 1023 u8 bit_shift; 1024 u32 val; 1025 bool busy, b_msk_en = false; 1026 int ret; 1027 1028 ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy, 1029 1, 30, false, rtwdev); 1030 if (ret) { 1031 rtw89_err(rtwdev, "write rf busy swsi\n"); 1032 return false; 1033 } 1034 1035 data &= RFREG_MASK; 1036 mask &= RFREG_MASK; 1037 1038 if (mask != RFREG_MASK) { 1039 b_msk_en = true; 1040 rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK, 1041 mask); 1042 bit_shift = __ffs(mask); 1043 data = (data << bit_shift) & RFREG_MASK; 1044 } 1045 1046 val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) | 1047 FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) | 1048 FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) | 1049 FIELD_PREP(B_SWSI_DATA_VAL_V1, data); 1050 1051 rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val); 1052 1053 return true; 1054 } 1055 1056 bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1057 u32 addr, u32 mask, u32 data) 1058 { 1059 bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr); 1060 1061 if (rf_path >= rtwdev->chip->rf_path_num) { 1062 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 1063 return false; 1064 } 1065 1066 if (ad_sel) 1067 return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data); 1068 else 1069 return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data); 1070 } 1071 EXPORT_SYMBOL(rtw89_phy_write_rf_v1); 1072 1073 static 1074 bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1075 u32 addr, u32 data) 1076 { 1077 static const u32 addr_is_idle[2] = {0x2C24, 0x2D24}; 1078 static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0}; 1079 bool busy; 1080 u32 val; 1081 int ret; 1082 1083 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy, 1084 1, 3800, false, 1085 rtwdev, addr_is_idle[rf_path], BIT(29)); 1086 if (ret) { 1087 rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__); 1088 return false; 1089 } 1090 1091 val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) | 1092 u32_encode_bits(data, B_HWSI_DATA_VAL); 1093 1094 rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val); 1095 1096 return true; 1097 } 1098 1099 static 1100 bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1101 u32 addr, u32 mask, u32 data) 1102 { 1103 u32 val; 1104 1105 if (mask == RFREG_MASK) { 1106 val = data; 1107 } else { 1108 val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr); 1109 val &= ~mask; 1110 val |= (data << __ffs(mask)) & mask; 1111 } 1112 1113 return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val); 1114 } 1115 1116 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1117 u32 addr, u32 mask, u32 data) 1118 { 1119 bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK); 1120 1121 if (rf_path >= rtwdev->chip->rf_path_num) { 1122 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 1123 return INV_RF_DATA; 1124 } 1125 1126 if (ad_sel) 1127 return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data); 1128 else 1129 return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data); 1130 } 1131 EXPORT_SYMBOL(rtw89_phy_write_rf_v2); 1132 1133 static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev) 1134 { 1135 return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1; 1136 } 1137 1138 static void __rtw89_phy_bb_reset(struct rtw89_dev *rtwdev, 1139 enum rtw89_phy_idx phy_idx) 1140 { 1141 const struct rtw89_chip_info *chip = rtwdev->chip; 1142 1143 chip->ops->bb_reset(rtwdev, phy_idx); 1144 } 1145 1146 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev) 1147 { 1148 __rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0); 1149 if (rtwdev->dbcc_en) 1150 __rtw89_phy_bb_reset(rtwdev, RTW89_PHY_1); 1151 } 1152 1153 static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev, 1154 const struct rtw89_reg2_def *reg, 1155 enum rtw89_rf_path rf_path, 1156 void *extra_data) 1157 { 1158 u32 addr; 1159 1160 if (reg->addr == 0xfe) { 1161 mdelay(50); 1162 } else if (reg->addr == 0xfd) { 1163 mdelay(5); 1164 } else if (reg->addr == 0xfc) { 1165 mdelay(1); 1166 } else if (reg->addr == 0xfb) { 1167 udelay(50); 1168 } else if (reg->addr == 0xfa) { 1169 udelay(5); 1170 } else if (reg->addr == 0xf9) { 1171 udelay(1); 1172 } else if (reg->data == BYPASS_CR_DATA) { 1173 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr); 1174 } else { 1175 addr = reg->addr; 1176 1177 if ((uintptr_t)extra_data == RTW89_PHY_1) 1178 addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr); 1179 1180 rtw89_phy_write32(rtwdev, addr, reg->data); 1181 } 1182 } 1183 1184 union rtw89_phy_bb_gain_arg { 1185 u32 addr; 1186 struct { 1187 union { 1188 u8 type; 1189 struct { 1190 u8 rxsc_start:4; 1191 u8 bw:4; 1192 }; 1193 }; 1194 u8 path; 1195 u8 gain_band; 1196 u8 cfg_type; 1197 }; 1198 } __packed; 1199 1200 static void 1201 rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev, 1202 union rtw89_phy_bb_gain_arg arg, u32 data) 1203 { 1204 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1205 u8 type = arg.type; 1206 u8 path = arg.path; 1207 u8 gband = arg.gain_band; 1208 int i; 1209 1210 switch (type) { 1211 case 0: 1212 for (i = 0; i < 4; i++, data >>= 8) 1213 gain->lna_gain[gband][path][i] = data & 0xff; 1214 break; 1215 case 1: 1216 for (i = 4; i < 7; i++, data >>= 8) 1217 gain->lna_gain[gband][path][i] = data & 0xff; 1218 break; 1219 case 2: 1220 for (i = 0; i < 2; i++, data >>= 8) 1221 gain->tia_gain[gband][path][i] = data & 0xff; 1222 break; 1223 default: 1224 rtw89_warn(rtwdev, 1225 "bb gain error {0x%x:0x%x} with unknown type: %d\n", 1226 arg.addr, data, type); 1227 break; 1228 } 1229 } 1230 1231 enum rtw89_phy_bb_rxsc_start_idx { 1232 RTW89_BB_RXSC_START_IDX_FULL = 0, 1233 RTW89_BB_RXSC_START_IDX_20 = 1, 1234 RTW89_BB_RXSC_START_IDX_20_1 = 5, 1235 RTW89_BB_RXSC_START_IDX_40 = 9, 1236 RTW89_BB_RXSC_START_IDX_80 = 13, 1237 }; 1238 1239 static void 1240 rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev, 1241 union rtw89_phy_bb_gain_arg arg, u32 data) 1242 { 1243 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1244 u8 rxsc_start = arg.rxsc_start; 1245 u8 bw = arg.bw; 1246 u8 path = arg.path; 1247 u8 gband = arg.gain_band; 1248 u8 rxsc; 1249 s8 ofst; 1250 int i; 1251 1252 switch (bw) { 1253 case RTW89_CHANNEL_WIDTH_20: 1254 gain->rpl_ofst_20[gband][path] = (s8)data; 1255 break; 1256 case RTW89_CHANNEL_WIDTH_40: 1257 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1258 gain->rpl_ofst_40[gband][path][0] = (s8)data; 1259 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1260 for (i = 0; i < 2; i++, data >>= 8) { 1261 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1262 ofst = (s8)(data & 0xff); 1263 gain->rpl_ofst_40[gband][path][rxsc] = ofst; 1264 } 1265 } 1266 break; 1267 case RTW89_CHANNEL_WIDTH_80: 1268 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1269 gain->rpl_ofst_80[gband][path][0] = (s8)data; 1270 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1271 for (i = 0; i < 4; i++, data >>= 8) { 1272 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1273 ofst = (s8)(data & 0xff); 1274 gain->rpl_ofst_80[gband][path][rxsc] = ofst; 1275 } 1276 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { 1277 for (i = 0; i < 2; i++, data >>= 8) { 1278 rxsc = RTW89_BB_RXSC_START_IDX_40 + i; 1279 ofst = (s8)(data & 0xff); 1280 gain->rpl_ofst_80[gband][path][rxsc] = ofst; 1281 } 1282 } 1283 break; 1284 case RTW89_CHANNEL_WIDTH_160: 1285 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1286 gain->rpl_ofst_160[gband][path][0] = (s8)data; 1287 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1288 for (i = 0; i < 4; i++, data >>= 8) { 1289 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1290 ofst = (s8)(data & 0xff); 1291 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1292 } 1293 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) { 1294 for (i = 0; i < 4; i++, data >>= 8) { 1295 rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i; 1296 ofst = (s8)(data & 0xff); 1297 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1298 } 1299 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { 1300 for (i = 0; i < 4; i++, data >>= 8) { 1301 rxsc = RTW89_BB_RXSC_START_IDX_40 + i; 1302 ofst = (s8)(data & 0xff); 1303 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1304 } 1305 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) { 1306 for (i = 0; i < 2; i++, data >>= 8) { 1307 rxsc = RTW89_BB_RXSC_START_IDX_80 + i; 1308 ofst = (s8)(data & 0xff); 1309 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1310 } 1311 } 1312 break; 1313 default: 1314 rtw89_warn(rtwdev, 1315 "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n", 1316 arg.addr, data, bw); 1317 break; 1318 } 1319 } 1320 1321 static void 1322 rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev, 1323 union rtw89_phy_bb_gain_arg arg, u32 data) 1324 { 1325 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1326 u8 type = arg.type; 1327 u8 path = arg.path; 1328 u8 gband = arg.gain_band; 1329 int i; 1330 1331 switch (type) { 1332 case 0: 1333 for (i = 0; i < 4; i++, data >>= 8) 1334 gain->lna_gain_bypass[gband][path][i] = data & 0xff; 1335 break; 1336 case 1: 1337 for (i = 4; i < 7; i++, data >>= 8) 1338 gain->lna_gain_bypass[gband][path][i] = data & 0xff; 1339 break; 1340 default: 1341 rtw89_warn(rtwdev, 1342 "bb gain bypass {0x%x:0x%x} with unknown type: %d\n", 1343 arg.addr, data, type); 1344 break; 1345 } 1346 } 1347 1348 static void 1349 rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev, 1350 union rtw89_phy_bb_gain_arg arg, u32 data) 1351 { 1352 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1353 u8 type = arg.type; 1354 u8 path = arg.path; 1355 u8 gband = arg.gain_band; 1356 int i; 1357 1358 switch (type) { 1359 case 0: 1360 for (i = 0; i < 4; i++, data >>= 8) 1361 gain->lna_op1db[gband][path][i] = data & 0xff; 1362 break; 1363 case 1: 1364 for (i = 4; i < 7; i++, data >>= 8) 1365 gain->lna_op1db[gband][path][i] = data & 0xff; 1366 break; 1367 case 2: 1368 for (i = 0; i < 4; i++, data >>= 8) 1369 gain->tia_lna_op1db[gband][path][i] = data & 0xff; 1370 break; 1371 case 3: 1372 for (i = 4; i < 8; i++, data >>= 8) 1373 gain->tia_lna_op1db[gband][path][i] = data & 0xff; 1374 break; 1375 default: 1376 rtw89_warn(rtwdev, 1377 "bb gain op1db {0x%x:0x%x} with unknown type: %d\n", 1378 arg.addr, data, type); 1379 break; 1380 } 1381 } 1382 1383 static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev, 1384 const struct rtw89_reg2_def *reg, 1385 enum rtw89_rf_path rf_path, 1386 void *extra_data) 1387 { 1388 const struct rtw89_chip_info *chip = rtwdev->chip; 1389 union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr }; 1390 struct rtw89_efuse *efuse = &rtwdev->efuse; 1391 1392 if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR) 1393 return; 1394 1395 if (arg.path >= chip->rf_path_num) 1396 return; 1397 1398 if (arg.addr >= 0xf9 && arg.addr <= 0xfe) { 1399 rtw89_warn(rtwdev, "bb gain table with flow ctrl\n"); 1400 return; 1401 } 1402 1403 switch (arg.cfg_type) { 1404 case 0: 1405 rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data); 1406 break; 1407 case 1: 1408 rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data); 1409 break; 1410 case 2: 1411 rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data); 1412 break; 1413 case 3: 1414 rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data); 1415 break; 1416 case 4: 1417 /* This cfg_type is only used by rfe_type >= 50 with eFEM */ 1418 if (efuse->rfe_type < 50) 1419 break; 1420 fallthrough; 1421 default: 1422 rtw89_warn(rtwdev, 1423 "bb gain {0x%x:0x%x} with unknown cfg type: %d\n", 1424 arg.addr, reg->data, arg.cfg_type); 1425 break; 1426 } 1427 } 1428 1429 static void 1430 rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, 1431 const struct rtw89_reg2_def *reg, 1432 enum rtw89_rf_path rf_path, 1433 struct rtw89_fw_h2c_rf_reg_info *info) 1434 { 1435 u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE; 1436 u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; 1437 1438 if (page >= RTW89_H2C_RF_PAGE_NUM) { 1439 rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d", 1440 rf_path, info->curr_idx); 1441 return; 1442 } 1443 1444 info->rtw89_phy_config_rf_h2c[page][idx] = 1445 cpu_to_le32((reg->addr << 20) | reg->data); 1446 info->curr_idx++; 1447 } 1448 1449 static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev, 1450 struct rtw89_fw_h2c_rf_reg_info *info) 1451 { 1452 u16 remain = info->curr_idx; 1453 u16 len = 0; 1454 u8 i; 1455 int ret = 0; 1456 1457 if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) { 1458 rtw89_warn(rtwdev, 1459 "rf reg h2c total len %d larger than %d\n", 1460 remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE); 1461 ret = -EINVAL; 1462 goto out; 1463 } 1464 1465 for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) { 1466 len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain; 1467 ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i); 1468 if (ret) 1469 goto out; 1470 } 1471 out: 1472 info->curr_idx = 0; 1473 1474 return ret; 1475 } 1476 1477 static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev, 1478 const struct rtw89_reg2_def *reg, 1479 enum rtw89_rf_path rf_path, 1480 void *extra_data) 1481 { 1482 u32 addr = reg->addr; 1483 1484 if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb || 1485 addr == 0xfa || addr == 0xf9) 1486 return; 1487 1488 if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100) 1489 return; 1490 1491 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1492 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1493 } 1494 1495 static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev, 1496 const struct rtw89_reg2_def *reg, 1497 enum rtw89_rf_path rf_path, 1498 void *extra_data) 1499 { 1500 if (reg->addr == 0xfe) { 1501 mdelay(50); 1502 } else if (reg->addr == 0xfd) { 1503 mdelay(5); 1504 } else if (reg->addr == 0xfc) { 1505 mdelay(1); 1506 } else if (reg->addr == 0xfb) { 1507 udelay(50); 1508 } else if (reg->addr == 0xfa) { 1509 udelay(5); 1510 } else if (reg->addr == 0xf9) { 1511 udelay(1); 1512 } else { 1513 rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data); 1514 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1515 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1516 } 1517 } 1518 1519 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev, 1520 const struct rtw89_reg2_def *reg, 1521 enum rtw89_rf_path rf_path, 1522 void *extra_data) 1523 { 1524 rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data); 1525 1526 if (reg->addr < 0x100) 1527 return; 1528 1529 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1530 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1531 } 1532 EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1); 1533 1534 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev, 1535 const struct rtw89_phy_table *table, 1536 u32 *headline_size, u32 *headline_idx, 1537 u8 rfe, u8 cv) 1538 { 1539 const struct rtw89_reg2_def *reg; 1540 u32 headline; 1541 u32 compare, target; 1542 u8 rfe_para, cv_para; 1543 u8 cv_max = 0; 1544 bool case_matched = false; 1545 u32 i; 1546 1547 for (i = 0; i < table->n_regs; i++) { 1548 reg = &table->regs[i]; 1549 headline = get_phy_headline(reg->addr); 1550 if (headline != PHY_HEADLINE_VALID) 1551 break; 1552 } 1553 *headline_size = i; 1554 if (*headline_size == 0) 1555 return 0; 1556 1557 /* case 1: RFE match, CV match */ 1558 compare = get_phy_compare(rfe, cv); 1559 for (i = 0; i < *headline_size; i++) { 1560 reg = &table->regs[i]; 1561 target = get_phy_target(reg->addr); 1562 if (target == compare) { 1563 *headline_idx = i; 1564 return 0; 1565 } 1566 } 1567 1568 /* case 2: RFE match, CV don't care */ 1569 compare = get_phy_compare(rfe, PHY_COND_DONT_CARE); 1570 for (i = 0; i < *headline_size; i++) { 1571 reg = &table->regs[i]; 1572 target = get_phy_target(reg->addr); 1573 if (target == compare) { 1574 *headline_idx = i; 1575 return 0; 1576 } 1577 } 1578 1579 /* case 3: RFE match, CV max in table */ 1580 for (i = 0; i < *headline_size; i++) { 1581 reg = &table->regs[i]; 1582 rfe_para = get_phy_cond_rfe(reg->addr); 1583 cv_para = get_phy_cond_cv(reg->addr); 1584 if (rfe_para == rfe) { 1585 if (cv_para >= cv_max) { 1586 cv_max = cv_para; 1587 *headline_idx = i; 1588 case_matched = true; 1589 } 1590 } 1591 } 1592 1593 if (case_matched) 1594 return 0; 1595 1596 /* case 4: RFE don't care, CV max in table */ 1597 for (i = 0; i < *headline_size; i++) { 1598 reg = &table->regs[i]; 1599 rfe_para = get_phy_cond_rfe(reg->addr); 1600 cv_para = get_phy_cond_cv(reg->addr); 1601 if (rfe_para == PHY_COND_DONT_CARE) { 1602 if (cv_para >= cv_max) { 1603 cv_max = cv_para; 1604 *headline_idx = i; 1605 case_matched = true; 1606 } 1607 } 1608 } 1609 1610 if (case_matched) 1611 return 0; 1612 1613 return -EINVAL; 1614 } 1615 1616 static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev, 1617 const struct rtw89_phy_table *table, 1618 void (*config)(struct rtw89_dev *rtwdev, 1619 const struct rtw89_reg2_def *reg, 1620 enum rtw89_rf_path rf_path, 1621 void *data), 1622 void *extra_data) 1623 { 1624 const struct rtw89_reg2_def *reg; 1625 enum rtw89_rf_path rf_path = table->rf_path; 1626 u8 rfe = rtwdev->efuse.rfe_type; 1627 u8 cv = rtwdev->hal.cv; 1628 u32 i; 1629 u32 headline_size = 0, headline_idx = 0; 1630 u32 target = 0, cfg_target; 1631 u8 cond; 1632 bool is_matched = true; 1633 bool target_found = false; 1634 int ret; 1635 1636 ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size, 1637 &headline_idx, rfe, cv); 1638 if (ret) { 1639 rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv); 1640 return; 1641 } 1642 1643 cfg_target = get_phy_target(table->regs[headline_idx].addr); 1644 for (i = headline_size; i < table->n_regs; i++) { 1645 reg = &table->regs[i]; 1646 cond = get_phy_cond(reg->addr); 1647 switch (cond) { 1648 case PHY_COND_BRANCH_IF: 1649 case PHY_COND_BRANCH_ELIF: 1650 target = get_phy_target(reg->addr); 1651 break; 1652 case PHY_COND_BRANCH_ELSE: 1653 is_matched = false; 1654 if (!target_found) { 1655 rtw89_warn(rtwdev, "failed to load CR %x/%x\n", 1656 reg->addr, reg->data); 1657 return; 1658 } 1659 break; 1660 case PHY_COND_BRANCH_END: 1661 is_matched = true; 1662 target_found = false; 1663 break; 1664 case PHY_COND_CHECK: 1665 if (target_found) { 1666 is_matched = false; 1667 break; 1668 } 1669 1670 if (target == cfg_target) { 1671 is_matched = true; 1672 target_found = true; 1673 } else { 1674 is_matched = false; 1675 target_found = false; 1676 } 1677 break; 1678 default: 1679 if (is_matched) 1680 config(rtwdev, reg, rf_path, extra_data); 1681 break; 1682 } 1683 } 1684 } 1685 1686 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev) 1687 { 1688 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1689 const struct rtw89_chip_info *chip = rtwdev->chip; 1690 const struct rtw89_phy_table *bb_table; 1691 const struct rtw89_phy_table *bb_gain_table; 1692 1693 bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table; 1694 rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL); 1695 if (rtwdev->dbcc_en) 1696 rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, 1697 (void *)RTW89_PHY_1); 1698 1699 rtw89_chip_init_txpwr_unit(rtwdev); 1700 1701 bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table; 1702 if (bb_gain_table) 1703 rtw89_phy_init_reg(rtwdev, bb_gain_table, 1704 chip->phy_def->config_bb_gain, NULL); 1705 1706 rtw89_phy_bb_reset(rtwdev); 1707 } 1708 1709 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev) 1710 { 1711 rtw89_phy_write32(rtwdev, 0x8080, 0x4); 1712 udelay(1); 1713 return rtw89_phy_read32(rtwdev, 0x8080); 1714 } 1715 1716 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio) 1717 { 1718 void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg, 1719 enum rtw89_rf_path rf_path, void *data); 1720 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1721 const struct rtw89_chip_info *chip = rtwdev->chip; 1722 const struct rtw89_phy_table *rf_table; 1723 struct rtw89_fw_h2c_rf_reg_info *rf_reg_info; 1724 u8 path; 1725 1726 rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL); 1727 if (!rf_reg_info) 1728 return; 1729 1730 for (path = RF_PATH_A; path < chip->rf_path_num; path++) { 1731 rf_table = elm_info->rf_radio[path] ? 1732 elm_info->rf_radio[path] : chip->rf_table[path]; 1733 rf_reg_info->rf_path = rf_table->rf_path; 1734 if (noio) 1735 config = rtw89_phy_config_rf_reg_noio; 1736 else 1737 config = rf_table->config ? rf_table->config : 1738 rtw89_phy_config_rf_reg; 1739 rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info); 1740 if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info)) 1741 rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n", 1742 rf_reg_info->rf_path); 1743 } 1744 kfree(rf_reg_info); 1745 } 1746 1747 static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev) 1748 { 1749 const struct rtw89_chip_info *chip = rtwdev->chip; 1750 u32 val; 1751 int ret; 1752 1753 /* IQK/DPK clock & reset */ 1754 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3); 1755 rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1); 1756 rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000); 1757 if (chip->chip_id != RTL8851B) 1758 rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000); 1759 if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT) 1760 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2); 1761 1762 /* check 0x8080 */ 1763 rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8); 1764 1765 ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10, 1766 1000, false, rtwdev); 1767 if (ret) 1768 #if defined(__linux__) 1769 rtw89_err(rtwdev, "failed to poll nctl block\n"); 1770 #elif defined(__FreeBSD__) 1771 rtw89_err(rtwdev, "failed to poll nctl block: ret %d val %#06x\n", ret, val); 1772 #endif 1773 } 1774 1775 static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev) 1776 { 1777 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1778 const struct rtw89_chip_info *chip = rtwdev->chip; 1779 const struct rtw89_phy_table *nctl_table; 1780 1781 rtw89_phy_preinit_rf_nctl(rtwdev); 1782 1783 nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table; 1784 rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL); 1785 1786 if (chip->nctl_post_table) 1787 rtw89_rfk_parser(rtwdev, chip->nctl_post_table); 1788 } 1789 1790 static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr) 1791 { 1792 u32 phy_page = addr >> 8; 1793 u32 ofst = 0; 1794 1795 switch (phy_page) { 1796 case 0x6: 1797 case 0x7: 1798 case 0x8: 1799 case 0x9: 1800 case 0xa: 1801 case 0xb: 1802 case 0xc: 1803 case 0xd: 1804 case 0x19: 1805 case 0x1a: 1806 case 0x1b: 1807 ofst = 0x2000; 1808 break; 1809 default: 1810 /* warning case */ 1811 ofst = 0; 1812 break; 1813 } 1814 1815 if (phy_page >= 0x40 && phy_page <= 0x4f) 1816 ofst = 0x2000; 1817 1818 return ofst; 1819 } 1820 1821 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1822 u32 data, enum rtw89_phy_idx phy_idx) 1823 { 1824 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1825 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1826 rtw89_phy_write32_mask(rtwdev, addr, mask, data); 1827 } 1828 EXPORT_SYMBOL(rtw89_phy_write32_idx); 1829 1830 void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits, 1831 enum rtw89_phy_idx phy_idx) 1832 { 1833 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1834 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1835 rtw89_phy_write32_set(rtwdev, addr, bits); 1836 } 1837 EXPORT_SYMBOL(rtw89_phy_write32_idx_set); 1838 1839 void rtw89_phy_write32_idx_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits, 1840 enum rtw89_phy_idx phy_idx) 1841 { 1842 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1843 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1844 rtw89_phy_write32_clr(rtwdev, addr, bits); 1845 } 1846 EXPORT_SYMBOL(rtw89_phy_write32_idx_clr); 1847 1848 u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1849 enum rtw89_phy_idx phy_idx) 1850 { 1851 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1852 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1853 return rtw89_phy_read32_mask(rtwdev, addr, mask); 1854 } 1855 EXPORT_SYMBOL(rtw89_phy_read32_idx); 1856 1857 void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1858 u32 val) 1859 { 1860 rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0); 1861 1862 if (!rtwdev->dbcc_en) 1863 return; 1864 1865 rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1); 1866 } 1867 EXPORT_SYMBOL(rtw89_phy_set_phy_regs); 1868 1869 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev, 1870 const struct rtw89_phy_reg3_tbl *tbl) 1871 { 1872 const struct rtw89_reg3_def *reg3; 1873 int i; 1874 1875 for (i = 0; i < tbl->size; i++) { 1876 reg3 = &tbl->reg3[i]; 1877 rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data); 1878 } 1879 } 1880 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl); 1881 1882 static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd) 1883 { 1884 switch (ant_gain_regd) { 1885 case RTW89_ANT_GAIN_ETSI: 1886 return RTW89_ETSI; 1887 default: 1888 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1889 "unknown antenna gain domain: %d\n", 1890 ant_gain_regd); 1891 return RTW89_REGD_NUM; 1892 } 1893 } 1894 1895 /* antenna gain in unit of 0.25 dbm */ 1896 #define RTW89_ANT_GAIN_2GHZ_MIN -8 1897 #define RTW89_ANT_GAIN_2GHZ_MAX 14 1898 #define RTW89_ANT_GAIN_5GHZ_MIN -8 1899 #define RTW89_ANT_GAIN_5GHZ_MAX 20 1900 #define RTW89_ANT_GAIN_6GHZ_MIN -8 1901 #define RTW89_ANT_GAIN_6GHZ_MAX 20 1902 1903 #define RTW89_ANT_GAIN_REF_2GHZ 14 1904 #define RTW89_ANT_GAIN_REF_5GHZ 20 1905 #define RTW89_ANT_GAIN_REF_6GHZ 20 1906 1907 void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev) 1908 { 1909 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 1910 const struct rtw89_chip_info *chip = rtwdev->chip; 1911 struct rtw89_acpi_rtag_result res = {}; 1912 u32 domain; 1913 int ret; 1914 u8 i, j; 1915 u8 regd; 1916 u8 val; 1917 1918 if (!chip->support_ant_gain) 1919 return; 1920 1921 ret = rtw89_acpi_evaluate_rtag(rtwdev, &res); 1922 if (ret) { 1923 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1924 "acpi: cannot eval rtag: %d\n", ret); 1925 return; 1926 } 1927 1928 if (res.revision != 0) { 1929 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1930 "unknown rtag revision: %d\n", res.revision); 1931 return; 1932 } 1933 1934 domain = get_unaligned_le32(&res.domain); 1935 1936 for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) { 1937 if (!(domain & BIT(i))) 1938 continue; 1939 1940 regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i); 1941 if (regd >= RTW89_REGD_NUM) 1942 continue; 1943 ant_gain->regd_enabled |= BIT(regd); 1944 } 1945 1946 for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) { 1947 for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) { 1948 val = res.ant_gain_table[i][j]; 1949 switch (j) { 1950 default: 1951 case RTW89_ANT_GAIN_2GHZ_SUBBAND: 1952 val = RTW89_ANT_GAIN_REF_2GHZ - 1953 clamp_t(s8, val, 1954 RTW89_ANT_GAIN_2GHZ_MIN, 1955 RTW89_ANT_GAIN_2GHZ_MAX); 1956 break; 1957 case RTW89_ANT_GAIN_5GHZ_SUBBAND_1: 1958 case RTW89_ANT_GAIN_5GHZ_SUBBAND_2: 1959 case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E: 1960 case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4: 1961 val = RTW89_ANT_GAIN_REF_5GHZ - 1962 clamp_t(s8, val, 1963 RTW89_ANT_GAIN_5GHZ_MIN, 1964 RTW89_ANT_GAIN_5GHZ_MAX); 1965 break; 1966 case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L: 1967 case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H: 1968 case RTW89_ANT_GAIN_6GHZ_SUBBAND_6: 1969 case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L: 1970 case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H: 1971 case RTW89_ANT_GAIN_6GHZ_SUBBAND_8: 1972 val = RTW89_ANT_GAIN_REF_6GHZ - 1973 clamp_t(s8, val, 1974 RTW89_ANT_GAIN_6GHZ_MIN, 1975 RTW89_ANT_GAIN_6GHZ_MAX); 1976 } 1977 ant_gain->offset[i][j] = val; 1978 } 1979 } 1980 } 1981 1982 static 1983 enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev, 1984 u32 center_freq) 1985 { 1986 switch (center_freq) { 1987 default: 1988 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1989 "center freq: %u to antenna gain subband is unhandled\n", 1990 center_freq); 1991 fallthrough; 1992 case 2412 ... 2484: 1993 return RTW89_ANT_GAIN_2GHZ_SUBBAND; 1994 case 5180 ... 5240: 1995 return RTW89_ANT_GAIN_5GHZ_SUBBAND_1; 1996 case 5250 ... 5320: 1997 return RTW89_ANT_GAIN_5GHZ_SUBBAND_2; 1998 case 5500 ... 5720: 1999 return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E; 2000 case 5745 ... 5885: 2001 return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4; 2002 case 5955 ... 6155: 2003 return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L; 2004 case 6175 ... 6415: 2005 return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H; 2006 case 6435 ... 6515: 2007 return RTW89_ANT_GAIN_6GHZ_SUBBAND_6; 2008 case 6535 ... 6695: 2009 return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L; 2010 case 6715 ... 6855: 2011 return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H; 2012 2013 /* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H 2014 * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with 2015 * struct rtw89_6ghz_span. 2016 */ 2017 2018 case 6895 ... 7115: 2019 return RTW89_ANT_GAIN_6GHZ_SUBBAND_8; 2020 } 2021 } 2022 2023 static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev, 2024 enum rtw89_rf_path path, u32 center_freq) 2025 { 2026 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2027 enum rtw89_ant_gain_subband subband_l, subband_h; 2028 const struct rtw89_6ghz_span *span; 2029 2030 span = rtw89_get_6ghz_span(rtwdev, center_freq); 2031 2032 if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) { 2033 subband_l = span->ant_gain_subband_low; 2034 subband_h = span->ant_gain_subband_high; 2035 } else { 2036 subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq); 2037 subband_h = subband_l; 2038 } 2039 2040 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2041 "center_freq %u: antenna gain subband {%u, %u}\n", 2042 center_freq, subband_l, subband_h); 2043 2044 return min(ant_gain->offset[path][subband_l], 2045 ant_gain->offset[path][subband_h]); 2046 } 2047 2048 static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq) 2049 { 2050 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2051 const struct rtw89_chip_info *chip = rtwdev->chip; 2052 u8 regd = rtw89_regd_get(rtwdev, band); 2053 s8 offset_patha, offset_pathb; 2054 2055 if (!chip->support_ant_gain) 2056 return 0; 2057 2058 if (!(ant_gain->regd_enabled & BIT(regd))) 2059 return 0; 2060 2061 offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq); 2062 offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq); 2063 2064 return max(offset_patha, offset_pathb); 2065 } 2066 2067 s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, 2068 const struct rtw89_chan *chan) 2069 { 2070 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2071 u8 regd = rtw89_regd_get(rtwdev, chan->band_type); 2072 s8 offset_patha, offset_pathb; 2073 2074 if (!(ant_gain->regd_enabled & BIT(regd))) 2075 return 0; 2076 2077 offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq); 2078 offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq); 2079 2080 return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb); 2081 } 2082 EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset); 2083 2084 void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev, 2085 const struct rtw89_chan *chan) 2086 { 2087 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2088 const struct rtw89_chip_info *chip = rtwdev->chip; 2089 u8 regd = rtw89_regd_get(rtwdev, chan->band_type); 2090 s8 offset_patha, offset_pathb; 2091 2092 if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) { 2093 seq_puts(m, "no DAG is applied\n"); 2094 return; 2095 } 2096 2097 offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq); 2098 offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq); 2099 2100 seq_printf(m, "ChainA offset: %d dBm\n", offset_patha); 2101 seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb); 2102 } 2103 2104 static const u8 rtw89_rs_idx_num_ax[] = { 2105 [RTW89_RS_CCK] = RTW89_RATE_CCK_NUM, 2106 [RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM, 2107 [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX, 2108 [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM, 2109 [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX, 2110 }; 2111 2112 static const u8 rtw89_rs_nss_num_ax[] = { 2113 [RTW89_RS_CCK] = 1, 2114 [RTW89_RS_OFDM] = 1, 2115 [RTW89_RS_MCS] = RTW89_NSS_NUM, 2116 [RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM, 2117 [RTW89_RS_OFFSET] = 1, 2118 }; 2119 2120 s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev, 2121 struct rtw89_txpwr_byrate *head, 2122 const struct rtw89_rate_desc *desc) 2123 { 2124 switch (desc->rs) { 2125 case RTW89_RS_CCK: 2126 return &head->cck[desc->idx]; 2127 case RTW89_RS_OFDM: 2128 return &head->ofdm[desc->idx]; 2129 case RTW89_RS_MCS: 2130 return &head->mcs[desc->ofdma][desc->nss][desc->idx]; 2131 case RTW89_RS_HEDCM: 2132 return &head->hedcm[desc->ofdma][desc->nss][desc->idx]; 2133 case RTW89_RS_OFFSET: 2134 return &head->offset[desc->idx]; 2135 default: 2136 rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs); 2137 return &head->trap; 2138 } 2139 } 2140 2141 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, 2142 const struct rtw89_txpwr_table *tbl) 2143 { 2144 const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data; 2145 const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size; 2146 struct rtw89_txpwr_byrate *byr_head; 2147 struct rtw89_rate_desc desc = {}; 2148 s8 *byr; 2149 u32 data; 2150 u8 i; 2151 2152 for (; cfg < end; cfg++) { 2153 byr_head = &rtwdev->byr[cfg->band][0]; 2154 desc.rs = cfg->rs; 2155 desc.nss = cfg->nss; 2156 data = cfg->data; 2157 2158 for (i = 0; i < cfg->len; i++, data >>= 8) { 2159 desc.idx = cfg->shf + i; 2160 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 2161 *byr = data & 0xff; 2162 } 2163 } 2164 } 2165 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate); 2166 2167 static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm) 2168 { 2169 const u8 tssi_deviation_point = 0; 2170 const u8 tssi_max_deviation = 2; 2171 2172 if (dbm <= tssi_deviation_point) 2173 dbm -= tssi_max_deviation; 2174 2175 return dbm; 2176 } 2177 2178 static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band) 2179 { 2180 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2181 const struct rtw89_reg_6ghz_tpe *tpe = ®ulatory->reg_6ghz_tpe; 2182 s8 cstr = S8_MAX; 2183 2184 if (band == RTW89_BAND_6G && tpe->valid) 2185 cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint); 2186 2187 return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr); 2188 } 2189 2190 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, 2191 const struct rtw89_rate_desc *rate_desc) 2192 { 2193 struct rtw89_txpwr_byrate *byr_head; 2194 s8 *byr; 2195 2196 if (rate_desc->rs == RTW89_RS_CCK) 2197 band = RTW89_BAND_2G; 2198 2199 byr_head = &rtwdev->byr[band][bw]; 2200 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc); 2201 2202 return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr); 2203 } 2204 2205 static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g) 2206 { 2207 switch (channel_6g) { 2208 case 1 ... 29: 2209 return (channel_6g - 1) / 2; 2210 case 33 ... 61: 2211 return (channel_6g - 3) / 2; 2212 case 65 ... 93: 2213 return (channel_6g - 5) / 2; 2214 case 97 ... 125: 2215 return (channel_6g - 7) / 2; 2216 case 129 ... 157: 2217 return (channel_6g - 9) / 2; 2218 case 161 ... 189: 2219 return (channel_6g - 11) / 2; 2220 case 193 ... 221: 2221 return (channel_6g - 13) / 2; 2222 case 225 ... 253: 2223 return (channel_6g - 15) / 2; 2224 default: 2225 rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g); 2226 return 0; 2227 } 2228 } 2229 2230 static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel) 2231 { 2232 if (band == RTW89_BAND_6G) 2233 return rtw89_channel_6g_to_idx(rtwdev, channel); 2234 2235 switch (channel) { 2236 case 1 ... 14: 2237 return channel - 1; 2238 case 36 ... 64: 2239 return (channel - 36) / 2; 2240 case 100 ... 144: 2241 return ((channel - 100) / 2) + 15; 2242 case 149 ... 177: 2243 return ((channel - 149) / 2) + 38; 2244 default: 2245 rtw89_warn(rtwdev, "unknown channel: %d\n", channel); 2246 return 0; 2247 } 2248 } 2249 2250 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, 2251 u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch) 2252 { 2253 const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; 2254 const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; 2255 const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; 2256 const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; 2257 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2258 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 2259 u32 freq = ieee80211_channel_to_frequency(ch, nl_band); 2260 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2261 u8 regd = rtw89_regd_get(rtwdev, band); 2262 u8 reg6 = regulatory->reg_6ghz_power; 2263 s8 lmt = 0, sar, offset; 2264 s8 cstr; 2265 2266 switch (band) { 2267 case RTW89_BAND_2G: 2268 lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; 2269 if (lmt) 2270 break; 2271 2272 lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; 2273 break; 2274 case RTW89_BAND_5G: 2275 lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; 2276 if (lmt) 2277 break; 2278 2279 lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; 2280 break; 2281 case RTW89_BAND_6G: 2282 lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx]; 2283 if (lmt) 2284 break; 2285 2286 lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW] 2287 [RTW89_REG_6GHZ_POWER_DFLT] 2288 [ch_idx]; 2289 break; 2290 default: 2291 rtw89_warn(rtwdev, "unknown band type: %d\n", band); 2292 return 0; 2293 } 2294 2295 offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq); 2296 lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset); 2297 sar = rtw89_query_sar(rtwdev, freq); 2298 cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); 2299 2300 return min3(lmt, sar, cstr); 2301 } 2302 EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit); 2303 2304 #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \ 2305 do { \ 2306 u8 __i; \ 2307 for (__i = 0; __i < RTW89_BF_NUM; __i++) \ 2308 ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \ 2309 band, \ 2310 bw, ntx, \ 2311 rs, __i, \ 2312 (ch)); \ 2313 } while (0) 2314 2315 static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev, 2316 struct rtw89_txpwr_limit_ax *lmt, 2317 u8 band, u8 ntx, u8 ch) 2318 { 2319 __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, 2320 ntx, RTW89_RS_CCK, ch); 2321 __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40, 2322 ntx, RTW89_RS_CCK, ch); 2323 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2324 ntx, RTW89_RS_OFDM, ch); 2325 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2326 RTW89_CHANNEL_WIDTH_20, 2327 ntx, RTW89_RS_MCS, ch); 2328 } 2329 2330 static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev, 2331 struct rtw89_txpwr_limit_ax *lmt, 2332 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2333 { 2334 __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, 2335 ntx, RTW89_RS_CCK, ch - 2); 2336 __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40, 2337 ntx, RTW89_RS_CCK, ch); 2338 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2339 ntx, RTW89_RS_OFDM, pri_ch); 2340 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2341 RTW89_CHANNEL_WIDTH_20, 2342 ntx, RTW89_RS_MCS, ch - 2); 2343 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2344 RTW89_CHANNEL_WIDTH_20, 2345 ntx, RTW89_RS_MCS, ch + 2); 2346 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2347 RTW89_CHANNEL_WIDTH_40, 2348 ntx, RTW89_RS_MCS, ch); 2349 } 2350 2351 static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev, 2352 struct rtw89_txpwr_limit_ax *lmt, 2353 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2354 { 2355 s8 val_0p5_n[RTW89_BF_NUM]; 2356 s8 val_0p5_p[RTW89_BF_NUM]; 2357 u8 i; 2358 2359 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2360 ntx, RTW89_RS_OFDM, pri_ch); 2361 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2362 RTW89_CHANNEL_WIDTH_20, 2363 ntx, RTW89_RS_MCS, ch - 6); 2364 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2365 RTW89_CHANNEL_WIDTH_20, 2366 ntx, RTW89_RS_MCS, ch - 2); 2367 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band, 2368 RTW89_CHANNEL_WIDTH_20, 2369 ntx, RTW89_RS_MCS, ch + 2); 2370 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band, 2371 RTW89_CHANNEL_WIDTH_20, 2372 ntx, RTW89_RS_MCS, ch + 6); 2373 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2374 RTW89_CHANNEL_WIDTH_40, 2375 ntx, RTW89_RS_MCS, ch - 4); 2376 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band, 2377 RTW89_CHANNEL_WIDTH_40, 2378 ntx, RTW89_RS_MCS, ch + 4); 2379 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band, 2380 RTW89_CHANNEL_WIDTH_80, 2381 ntx, RTW89_RS_MCS, ch); 2382 2383 __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40, 2384 ntx, RTW89_RS_MCS, ch - 4); 2385 __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40, 2386 ntx, RTW89_RS_MCS, ch + 4); 2387 2388 for (i = 0; i < RTW89_BF_NUM; i++) 2389 lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); 2390 } 2391 2392 static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev, 2393 struct rtw89_txpwr_limit_ax *lmt, 2394 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2395 { 2396 s8 val_0p5_n[RTW89_BF_NUM]; 2397 s8 val_0p5_p[RTW89_BF_NUM]; 2398 s8 val_2p5_n[RTW89_BF_NUM]; 2399 s8 val_2p5_p[RTW89_BF_NUM]; 2400 u8 i; 2401 2402 /* fill ofdm section */ 2403 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2404 ntx, RTW89_RS_OFDM, pri_ch); 2405 2406 /* fill mcs 20m section */ 2407 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2408 RTW89_CHANNEL_WIDTH_20, 2409 ntx, RTW89_RS_MCS, ch - 14); 2410 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2411 RTW89_CHANNEL_WIDTH_20, 2412 ntx, RTW89_RS_MCS, ch - 10); 2413 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band, 2414 RTW89_CHANNEL_WIDTH_20, 2415 ntx, RTW89_RS_MCS, ch - 6); 2416 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band, 2417 RTW89_CHANNEL_WIDTH_20, 2418 ntx, RTW89_RS_MCS, ch - 2); 2419 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band, 2420 RTW89_CHANNEL_WIDTH_20, 2421 ntx, RTW89_RS_MCS, ch + 2); 2422 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band, 2423 RTW89_CHANNEL_WIDTH_20, 2424 ntx, RTW89_RS_MCS, ch + 6); 2425 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band, 2426 RTW89_CHANNEL_WIDTH_20, 2427 ntx, RTW89_RS_MCS, ch + 10); 2428 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band, 2429 RTW89_CHANNEL_WIDTH_20, 2430 ntx, RTW89_RS_MCS, ch + 14); 2431 2432 /* fill mcs 40m section */ 2433 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2434 RTW89_CHANNEL_WIDTH_40, 2435 ntx, RTW89_RS_MCS, ch - 12); 2436 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band, 2437 RTW89_CHANNEL_WIDTH_40, 2438 ntx, RTW89_RS_MCS, ch - 4); 2439 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band, 2440 RTW89_CHANNEL_WIDTH_40, 2441 ntx, RTW89_RS_MCS, ch + 4); 2442 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band, 2443 RTW89_CHANNEL_WIDTH_40, 2444 ntx, RTW89_RS_MCS, ch + 12); 2445 2446 /* fill mcs 80m section */ 2447 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band, 2448 RTW89_CHANNEL_WIDTH_80, 2449 ntx, RTW89_RS_MCS, ch - 8); 2450 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band, 2451 RTW89_CHANNEL_WIDTH_80, 2452 ntx, RTW89_RS_MCS, ch + 8); 2453 2454 /* fill mcs 160m section */ 2455 __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band, 2456 RTW89_CHANNEL_WIDTH_160, 2457 ntx, RTW89_RS_MCS, ch); 2458 2459 /* fill mcs 40m 0p5 section */ 2460 __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40, 2461 ntx, RTW89_RS_MCS, ch - 4); 2462 __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40, 2463 ntx, RTW89_RS_MCS, ch + 4); 2464 2465 for (i = 0; i < RTW89_BF_NUM; i++) 2466 lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); 2467 2468 /* fill mcs 40m 2p5 section */ 2469 __fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40, 2470 ntx, RTW89_RS_MCS, ch - 8); 2471 __fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40, 2472 ntx, RTW89_RS_MCS, ch + 8); 2473 2474 for (i = 0; i < RTW89_BF_NUM; i++) 2475 lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]); 2476 } 2477 2478 static 2479 void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev, 2480 const struct rtw89_chan *chan, 2481 struct rtw89_txpwr_limit_ax *lmt, 2482 u8 ntx) 2483 { 2484 u8 band = chan->band_type; 2485 u8 pri_ch = chan->primary_channel; 2486 u8 ch = chan->channel; 2487 u8 bw = chan->band_width; 2488 2489 memset(lmt, 0, sizeof(*lmt)); 2490 2491 switch (bw) { 2492 case RTW89_CHANNEL_WIDTH_20: 2493 rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch); 2494 break; 2495 case RTW89_CHANNEL_WIDTH_40: 2496 rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch, 2497 pri_ch); 2498 break; 2499 case RTW89_CHANNEL_WIDTH_80: 2500 rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch, 2501 pri_ch); 2502 break; 2503 case RTW89_CHANNEL_WIDTH_160: 2504 rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch, 2505 pri_ch); 2506 break; 2507 } 2508 } 2509 2510 s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, 2511 u8 ru, u8 ntx, u8 ch) 2512 { 2513 const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; 2514 const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; 2515 const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; 2516 const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; 2517 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2518 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 2519 u32 freq = ieee80211_channel_to_frequency(ch, nl_band); 2520 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2521 u8 regd = rtw89_regd_get(rtwdev, band); 2522 u8 reg6 = regulatory->reg_6ghz_power; 2523 s8 lmt_ru = 0, sar, offset; 2524 s8 cstr; 2525 2526 switch (band) { 2527 case RTW89_BAND_2G: 2528 lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx]; 2529 if (lmt_ru) 2530 break; 2531 2532 lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; 2533 break; 2534 case RTW89_BAND_5G: 2535 lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx]; 2536 if (lmt_ru) 2537 break; 2538 2539 lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; 2540 break; 2541 case RTW89_BAND_6G: 2542 lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx]; 2543 if (lmt_ru) 2544 break; 2545 2546 lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW] 2547 [RTW89_REG_6GHZ_POWER_DFLT] 2548 [ch_idx]; 2549 break; 2550 default: 2551 rtw89_warn(rtwdev, "unknown band type: %d\n", band); 2552 return 0; 2553 } 2554 2555 offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq); 2556 lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset); 2557 sar = rtw89_query_sar(rtwdev, freq); 2558 cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); 2559 2560 return min3(lmt_ru, sar, cstr); 2561 } 2562 2563 static void 2564 rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev, 2565 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2566 u8 band, u8 ntx, u8 ch) 2567 { 2568 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2569 RTW89_RU26, 2570 ntx, ch); 2571 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2572 RTW89_RU52, 2573 ntx, ch); 2574 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2575 RTW89_RU106, 2576 ntx, ch); 2577 } 2578 2579 static void 2580 rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev, 2581 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2582 u8 band, u8 ntx, u8 ch) 2583 { 2584 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2585 RTW89_RU26, 2586 ntx, ch - 2); 2587 lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2588 RTW89_RU26, 2589 ntx, ch + 2); 2590 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2591 RTW89_RU52, 2592 ntx, ch - 2); 2593 lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2594 RTW89_RU52, 2595 ntx, ch + 2); 2596 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2597 RTW89_RU106, 2598 ntx, ch - 2); 2599 lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2600 RTW89_RU106, 2601 ntx, ch + 2); 2602 } 2603 2604 static void 2605 rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev, 2606 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2607 u8 band, u8 ntx, u8 ch) 2608 { 2609 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2610 RTW89_RU26, 2611 ntx, ch - 6); 2612 lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2613 RTW89_RU26, 2614 ntx, ch - 2); 2615 lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2616 RTW89_RU26, 2617 ntx, ch + 2); 2618 lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2619 RTW89_RU26, 2620 ntx, ch + 6); 2621 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2622 RTW89_RU52, 2623 ntx, ch - 6); 2624 lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2625 RTW89_RU52, 2626 ntx, ch - 2); 2627 lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2628 RTW89_RU52, 2629 ntx, ch + 2); 2630 lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2631 RTW89_RU52, 2632 ntx, ch + 6); 2633 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2634 RTW89_RU106, 2635 ntx, ch - 6); 2636 lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2637 RTW89_RU106, 2638 ntx, ch - 2); 2639 lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2640 RTW89_RU106, 2641 ntx, ch + 2); 2642 lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2643 RTW89_RU106, 2644 ntx, ch + 6); 2645 } 2646 2647 static void 2648 rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev, 2649 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2650 u8 band, u8 ntx, u8 ch) 2651 { 2652 static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 }; 2653 int i; 2654 2655 #if defined(__linux__) 2656 static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX); 2657 #elif defined(__FreeBSD__) 2658 rtw89_static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX); 2659 #endif 2660 for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) { 2661 lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2662 RTW89_RU26, 2663 ntx, 2664 ch + ofst[i]); 2665 lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2666 RTW89_RU52, 2667 ntx, 2668 ch + ofst[i]); 2669 lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2670 RTW89_RU106, 2671 ntx, 2672 ch + ofst[i]); 2673 } 2674 } 2675 2676 static 2677 void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, 2678 const struct rtw89_chan *chan, 2679 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2680 u8 ntx) 2681 { 2682 u8 band = chan->band_type; 2683 u8 ch = chan->channel; 2684 u8 bw = chan->band_width; 2685 2686 memset(lmt_ru, 0, sizeof(*lmt_ru)); 2687 2688 switch (bw) { 2689 case RTW89_CHANNEL_WIDTH_20: 2690 rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx, 2691 ch); 2692 break; 2693 case RTW89_CHANNEL_WIDTH_40: 2694 rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx, 2695 ch); 2696 break; 2697 case RTW89_CHANNEL_WIDTH_80: 2698 rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx, 2699 ch); 2700 break; 2701 case RTW89_CHANNEL_WIDTH_160: 2702 rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx, 2703 ch); 2704 break; 2705 } 2706 } 2707 2708 static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev, 2709 const struct rtw89_chan *chan, 2710 enum rtw89_phy_idx phy_idx) 2711 { 2712 u8 max_nss_num = rtwdev->chip->rf_path_num; 2713 static const u8 rs[] = { 2714 RTW89_RS_CCK, 2715 RTW89_RS_OFDM, 2716 RTW89_RS_MCS, 2717 RTW89_RS_HEDCM, 2718 }; 2719 struct rtw89_rate_desc cur = {}; 2720 u8 band = chan->band_type; 2721 u8 ch = chan->channel; 2722 u32 addr, val; 2723 s8 v[4] = {}; 2724 u8 i; 2725 2726 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2727 "[TXPWR] set txpwr byrate with ch=%d\n", ch); 2728 2729 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4); 2730 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4); 2731 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4); 2732 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4); 2733 2734 addr = R_AX_PWR_BY_RATE; 2735 for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) { 2736 for (i = 0; i < ARRAY_SIZE(rs); i++) { 2737 if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]]) 2738 continue; 2739 2740 cur.rs = rs[i]; 2741 for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]]; 2742 cur.idx++) { 2743 v[cur.idx % 4] = 2744 rtw89_phy_read_txpwr_byrate(rtwdev, 2745 band, 0, 2746 &cur); 2747 2748 if ((cur.idx + 1) % 4) 2749 continue; 2750 2751 val = FIELD_PREP(GENMASK(7, 0), v[0]) | 2752 FIELD_PREP(GENMASK(15, 8), v[1]) | 2753 FIELD_PREP(GENMASK(23, 16), v[2]) | 2754 FIELD_PREP(GENMASK(31, 24), v[3]); 2755 2756 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 2757 val); 2758 addr += 4; 2759 } 2760 } 2761 } 2762 } 2763 2764 static 2765 void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev, 2766 const struct rtw89_chan *chan, 2767 enum rtw89_phy_idx phy_idx) 2768 { 2769 struct rtw89_rate_desc desc = { 2770 .nss = RTW89_NSS_1, 2771 .rs = RTW89_RS_OFFSET, 2772 }; 2773 u8 band = chan->band_type; 2774 s8 v[RTW89_RATE_OFFSET_NUM_AX] = {}; 2775 u32 val; 2776 2777 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n"); 2778 2779 for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++) 2780 v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc); 2781 2782 BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5); 2783 val = FIELD_PREP(GENMASK(3, 0), v[0]) | 2784 FIELD_PREP(GENMASK(7, 4), v[1]) | 2785 FIELD_PREP(GENMASK(11, 8), v[2]) | 2786 FIELD_PREP(GENMASK(15, 12), v[3]) | 2787 FIELD_PREP(GENMASK(19, 16), v[4]); 2788 2789 rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL, 2790 GENMASK(19, 0), val); 2791 } 2792 2793 static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev, 2794 const struct rtw89_chan *chan, 2795 enum rtw89_phy_idx phy_idx) 2796 { 2797 u8 max_ntx_num = rtwdev->chip->rf_path_num; 2798 struct rtw89_txpwr_limit_ax lmt; 2799 u8 ch = chan->channel; 2800 u8 bw = chan->band_width; 2801 const s8 *ptr; 2802 u32 addr, val; 2803 u8 i, j; 2804 2805 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2806 "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw); 2807 2808 BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) != 2809 RTW89_TXPWR_LMT_PAGE_SIZE_AX); 2810 2811 addr = R_AX_PWR_LMT; 2812 for (i = 0; i < max_ntx_num; i++) { 2813 rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i); 2814 2815 ptr = (s8 *)&lmt; 2816 for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX; 2817 j += 4, addr += 4, ptr += 4) { 2818 val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | 2819 FIELD_PREP(GENMASK(15, 8), ptr[1]) | 2820 FIELD_PREP(GENMASK(23, 16), ptr[2]) | 2821 FIELD_PREP(GENMASK(31, 24), ptr[3]); 2822 2823 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); 2824 } 2825 } 2826 } 2827 2828 static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, 2829 const struct rtw89_chan *chan, 2830 enum rtw89_phy_idx phy_idx) 2831 { 2832 u8 max_ntx_num = rtwdev->chip->rf_path_num; 2833 struct rtw89_txpwr_limit_ru_ax lmt_ru; 2834 u8 ch = chan->channel; 2835 u8 bw = chan->band_width; 2836 const s8 *ptr; 2837 u32 addr, val; 2838 u8 i, j; 2839 2840 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2841 "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw); 2842 2843 BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) != 2844 RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX); 2845 2846 addr = R_AX_PWR_RU_LMT; 2847 for (i = 0; i < max_ntx_num; i++) { 2848 rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i); 2849 2850 ptr = (s8 *)&lmt_ru; 2851 for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX; 2852 j += 4, addr += 4, ptr += 4) { 2853 val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | 2854 FIELD_PREP(GENMASK(15, 8), ptr[1]) | 2855 FIELD_PREP(GENMASK(23, 16), ptr[2]) | 2856 FIELD_PREP(GENMASK(31, 24), ptr[3]); 2857 2858 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); 2859 } 2860 } 2861 } 2862 2863 struct rtw89_phy_iter_ra_data { 2864 struct rtw89_dev *rtwdev; 2865 struct sk_buff *c2h; 2866 }; 2867 2868 static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link, 2869 struct ieee80211_link_sta *link_sta, 2870 struct rtw89_phy_iter_ra_data *ra_data) 2871 { 2872 struct rtw89_dev *rtwdev = ra_data->rtwdev; 2873 const struct rtw89_c2h_ra_rpt *c2h = 2874 (const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data; 2875 struct rtw89_ra_report *ra_report = &rtwsta_link->ra_report; 2876 const struct rtw89_chip_info *chip = rtwdev->chip; 2877 bool format_v1 = chip->chip_gen == RTW89_CHIP_BE; 2878 u8 mode, rate, bw, giltf, mac_id; 2879 u16 legacy_bitrate; 2880 bool valid; 2881 u8 mcs = 0; 2882 u8 t; 2883 2884 mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID); 2885 if (mac_id != rtwsta_link->mac_id) 2886 return; 2887 2888 rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS); 2889 bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW); 2890 giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF); 2891 mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL); 2892 2893 if (format_v1) { 2894 t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7); 2895 rate |= u8_encode_bits(t, BIT(7)); 2896 t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2); 2897 bw |= u8_encode_bits(t, BIT(2)); 2898 t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2); 2899 mode |= u8_encode_bits(t, BIT(2)); 2900 } 2901 2902 if (mode == RTW89_RA_RPT_MODE_LEGACY) { 2903 valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate); 2904 if (!valid) 2905 return; 2906 } 2907 2908 memset(&ra_report->txrate, 0, sizeof(ra_report->txrate)); 2909 2910 switch (mode) { 2911 case RTW89_RA_RPT_MODE_LEGACY: 2912 ra_report->txrate.legacy = legacy_bitrate; 2913 break; 2914 case RTW89_RA_RPT_MODE_HT: 2915 ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS; 2916 if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw)) 2917 rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate), 2918 FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate)); 2919 else 2920 rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate); 2921 ra_report->txrate.mcs = rate; 2922 if (giltf) 2923 ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2924 mcs = ra_report->txrate.mcs & 0x07; 2925 break; 2926 case RTW89_RA_RPT_MODE_VHT: 2927 ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; 2928 ra_report->txrate.mcs = format_v1 ? 2929 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : 2930 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); 2931 ra_report->txrate.nss = format_v1 ? 2932 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : 2933 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; 2934 if (giltf) 2935 ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2936 mcs = ra_report->txrate.mcs; 2937 break; 2938 case RTW89_RA_RPT_MODE_HE: 2939 ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS; 2940 ra_report->txrate.mcs = format_v1 ? 2941 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : 2942 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); 2943 ra_report->txrate.nss = format_v1 ? 2944 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : 2945 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; 2946 if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) 2947 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8; 2948 else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) 2949 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6; 2950 else 2951 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2; 2952 mcs = ra_report->txrate.mcs; 2953 break; 2954 case RTW89_RA_RPT_MODE_EHT: 2955 ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS; 2956 ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1); 2957 ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1; 2958 if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) 2959 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8; 2960 else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) 2961 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6; 2962 else 2963 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2; 2964 mcs = ra_report->txrate.mcs; 2965 break; 2966 } 2967 2968 ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw); 2969 ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate); 2970 ra_report->hw_rate = format_v1 ? 2971 u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) | 2972 u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) : 2973 u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) | 2974 u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL); 2975 ra_report->might_fallback_legacy = mcs <= 2; 2976 link_sta->agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report); 2977 rtwsta_link->max_agg_wait = link_sta->agg.max_rc_amsdu_len / 1500 - 1; 2978 } 2979 2980 static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) 2981 { 2982 struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data; 2983 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 2984 struct rtw89_sta_link *rtwsta_link; 2985 struct ieee80211_link_sta *link_sta; 2986 unsigned int link_id; 2987 2988 rcu_read_lock(); 2989 2990 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 2991 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 2992 __rtw89_phy_c2h_ra_rpt_iter(rtwsta_link, link_sta, ra_data); 2993 } 2994 2995 rcu_read_unlock(); 2996 } 2997 2998 static void 2999 rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3000 { 3001 struct rtw89_phy_iter_ra_data ra_data; 3002 3003 ra_data.rtwdev = rtwdev; 3004 ra_data.c2h = c2h; 3005 ieee80211_iterate_stations_atomic(rtwdev->hw, 3006 rtw89_phy_c2h_ra_rpt_iter, 3007 &ra_data); 3008 } 3009 3010 static 3011 void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev, 3012 struct sk_buff *c2h, u32 len) = { 3013 [RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt, 3014 [RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL, 3015 [RTW89_PHY_C2H_FUNC_TXSTS] = NULL, 3016 }; 3017 3018 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev, 3019 enum rtw89_phy_c2h_rfk_log_func func, 3020 void *content, u16 len) 3021 { 3022 struct rtw89_c2h_rf_txgapk_rpt_log *txgapk; 3023 struct rtw89_c2h_rf_rxdck_rpt_log *rxdck; 3024 struct rtw89_c2h_rf_dack_rpt_log *dack; 3025 struct rtw89_c2h_rf_tssi_rpt_log *tssi; 3026 struct rtw89_c2h_rf_dpk_rpt_log *dpk; 3027 struct rtw89_c2h_rf_iqk_rpt_log *iqk; 3028 int i, j, k; 3029 3030 switch (func) { 3031 case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK: 3032 if (len != sizeof(*iqk)) 3033 goto out; 3034 3035 iqk = content; 3036 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3037 "[IQK] iqk->is_iqk_init = %x\n", iqk->is_iqk_init); 3038 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3039 "[IQK] iqk->is_reload = %x\n", iqk->is_reload); 3040 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3041 "[IQK] iqk->is_nbiqk = %x\n", iqk->is_nbiqk); 3042 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3043 "[IQK] iqk->txiqk_en = %x\n", iqk->txiqk_en); 3044 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3045 "[IQK] iqk->rxiqk_en = %x\n", iqk->rxiqk_en); 3046 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3047 "[IQK] iqk->lok_en = %x\n", iqk->lok_en); 3048 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3049 "[IQK] iqk->iqk_xym_en = %x\n", iqk->iqk_xym_en); 3050 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3051 "[IQK] iqk->iqk_sram_en = %x\n", iqk->iqk_sram_en); 3052 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3053 "[IQK] iqk->iqk_fft_en = %x\n", iqk->iqk_fft_en); 3054 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3055 "[IQK] iqk->is_fw_iqk = %x\n", iqk->is_fw_iqk); 3056 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3057 "[IQK] iqk->is_iqk_enable = %x\n", iqk->is_iqk_enable); 3058 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3059 "[IQK] iqk->iqk_cfir_en = %x\n", iqk->iqk_cfir_en); 3060 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3061 "[IQK] iqk->thermal_rek_en = %x\n", iqk->thermal_rek_en); 3062 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3063 "[IQK] iqk->version = %x\n", iqk->version); 3064 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3065 "[IQK] iqk->phy = %x\n", iqk->phy); 3066 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3067 "[IQK] iqk->fwk_status = %x\n", iqk->fwk_status); 3068 3069 for (i = 0; i < 2; i++) { 3070 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3071 "[IQK] ======== Path %x ========\n", i); 3072 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_band[%d] = %x\n", 3073 i, iqk->iqk_band[i]); 3074 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_ch[%d] = %x\n", 3075 i, iqk->iqk_ch[i]); 3076 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_bw[%d] = %x\n", 3077 i, iqk->iqk_bw[i]); 3078 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_idac[%d] = %x\n", 3079 i, le32_to_cpu(iqk->lok_idac[i])); 3080 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_vbuf[%d] = %x\n", 3081 i, le32_to_cpu(iqk->lok_vbuf[i])); 3082 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_tx_fail[%d] = %x\n", 3083 i, iqk->iqk_tx_fail[i]); 3084 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_rx_fail[%d] = %x\n", 3085 i, iqk->iqk_rx_fail[i]); 3086 for (j = 0; j < 4; j++) 3087 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3088 "[IQK] iqk->rftxgain[%d][%d] = %x\n", 3089 i, j, le32_to_cpu(iqk->rftxgain[i][j])); 3090 for (j = 0; j < 4; j++) 3091 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3092 "[IQK] iqk->tx_xym[%d][%d] = %x\n", 3093 i, j, le32_to_cpu(iqk->tx_xym[i][j])); 3094 for (j = 0; j < 4; j++) 3095 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3096 "[IQK] iqk->rfrxgain[%d][%d] = %x\n", 3097 i, j, le32_to_cpu(iqk->rfrxgain[i][j])); 3098 for (j = 0; j < 4; j++) 3099 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3100 "[IQK] iqk->rx_xym[%d][%d] = %x\n", 3101 i, j, le32_to_cpu(iqk->rx_xym[i][j])); 3102 } 3103 return; 3104 case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK: 3105 if (len != sizeof(*dpk)) 3106 goto out; 3107 3108 dpk = content; 3109 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3110 "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n", 3111 dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok); 3112 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3113 "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n", 3114 dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q); 3115 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3116 "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n", 3117 dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov); 3118 return; 3119 case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK: 3120 if (len != sizeof(*dack)) 3121 goto out; 3122 3123 dack = content; 3124 3125 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]FWDACK SUMMARY!!!!!\n"); 3126 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3127 "[DACK]FWDACK ver = 0x%x, FWDACK rpt_ver = 0x%x, driver rpt_ver = 0x%x\n", 3128 dack->fwdack_ver, dack->fwdack_info_ver, 0x2); 3129 3130 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3131 "[DACK]timeout code = [0x%x 0x%x 0x%x 0x%x 0x%x]\n", 3132 dack->addck_timeout, dack->cdack_timeout, dack->dadck_timeout, 3133 dack->adgaink_timeout, dack->msbk_timeout); 3134 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3135 "[DACK]DACK fail = 0x%x\n", dack->dack_fail); 3136 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3137 "[DACK]S0 WBADCK = [0x%x]\n", dack->wbdck_d[0]); 3138 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3139 "[DACK]S1 WBADCK = [0x%x]\n", dack->wbdck_d[1]); 3140 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3141 "[DACK]DRCK = [0x%x]\n", dack->rck_d); 3142 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n", 3143 dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]); 3144 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n", 3145 dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]); 3146 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n", 3147 dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]); 3148 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n", 3149 dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]); 3150 3151 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n", 3152 ((u32)dack->addck2_hd[0][0][0] << 8) | dack->addck2_ld[0][0][0], 3153 ((u32)dack->addck2_hd[0][0][1] << 8) | dack->addck2_ld[0][0][1]); 3154 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n", 3155 ((u32)dack->addck2_hd[0][1][0] << 8) | dack->addck2_ld[0][1][0], 3156 ((u32)dack->addck2_hd[0][1][1] << 8) | dack->addck2_ld[0][1][1]); 3157 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n", 3158 ((u32)dack->addck2_hd[1][0][0] << 8) | dack->addck2_ld[1][0][0], 3159 ((u32)dack->addck2_hd[1][0][1] << 8) | dack->addck2_ld[1][0][1]); 3160 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n", 3161 ((u32)dack->addck2_hd[1][1][0] << 8) | dack->addck2_ld[1][1][0], 3162 ((u32)dack->addck2_hd[1][1][1] << 8) | dack->addck2_ld[1][1][1]); 3163 3164 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n", 3165 dack->adgaink_d[0][0], dack->adgaink_d[0][1]); 3166 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n", 3167 dack->adgaink_d[1][0], dack->adgaink_d[1][1]); 3168 3169 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 3170 dack->dadck_d[0][0], dack->dadck_d[0][1]); 3171 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", 3172 dack->dadck_d[1][0], dack->dadck_d[1][1]); 3173 3174 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n", 3175 ((u32)dack->biask_hd[0][0] << 8) | dack->biask_ld[0][0]); 3176 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n", 3177 ((u32)dack->biask_hd[1][0] << 8) | dack->biask_ld[1][0]); 3178 3179 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); 3180 for (i = 0; i < 0x10; i++) 3181 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 3182 dack->msbk_d[0][0][i]); 3183 3184 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); 3185 for (i = 0; i < 0x10; i++) 3186 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 3187 dack->msbk_d[0][1][i]); 3188 3189 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); 3190 for (i = 0; i < 0x10; i++) 3191 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 3192 dack->msbk_d[1][0][i]); 3193 3194 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); 3195 for (i = 0; i < 0x10; i++) 3196 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 3197 dack->msbk_d[1][1][i]); 3198 return; 3199 case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK: 3200 if (len != sizeof(*rxdck)) 3201 goto out; 3202 3203 rxdck = content; 3204 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3205 "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n", 3206 rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch, 3207 rxdck->timeout); 3208 return; 3209 case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI: 3210 if (len != sizeof(*tssi)) 3211 goto out; 3212 3213 tssi = content; 3214 for (i = 0; i < 2; i++) { 3215 for (j = 0; j < 2; j++) { 3216 for (k = 0; k < 4; k++) { 3217 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3218 "[TSSI] alignment_power_cw_h[%d][%d][%d]=%d\n", 3219 i, j, k, tssi->alignment_power_cw_h[i][j][k]); 3220 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3221 "[TSSI] alignment_power_cw_l[%d][%d][%d]=%d\n", 3222 i, j, k, tssi->alignment_power_cw_l[i][j][k]); 3223 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3224 "[TSSI] alignment_power[%d][%d][%d]=%d\n", 3225 i, j, k, tssi->alignment_power[i][j][k]); 3226 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3227 "[TSSI] alignment_power_cw[%d][%d][%d]=%d\n", 3228 i, j, k, 3229 (tssi->alignment_power_cw_h[i][j][k] << 8) + 3230 tssi->alignment_power_cw_l[i][j][k]); 3231 } 3232 3233 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3234 "[TSSI] tssi_alimk_state[%d][%d]=%d\n", 3235 i, j, tssi->tssi_alimk_state[i][j]); 3236 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3237 "[TSSI] default_txagc_offset[%d]=%d\n", 3238 j, tssi->default_txagc_offset[0][j]); 3239 } 3240 } 3241 return; 3242 case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK: 3243 if (len != sizeof(*txgapk)) 3244 goto out; 3245 3246 txgapk = content; 3247 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3248 "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n", 3249 le32_to_cpu(txgapk->r0x8010[0]), 3250 le32_to_cpu(txgapk->r0x8010[1])); 3251 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n", 3252 txgapk->chk_id); 3253 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n", 3254 le32_to_cpu(txgapk->chk_cnt)); 3255 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n", 3256 txgapk->ver); 3257 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt rsv1 = %d\n", 3258 txgapk->rsv1); 3259 3260 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n", 3261 (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]); 3262 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n", 3263 (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]); 3264 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n", 3265 (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]); 3266 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n", 3267 (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]); 3268 return; 3269 default: 3270 break; 3271 } 3272 3273 out: 3274 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3275 "unexpected RFK func %d report log with length %d\n", func, len); 3276 } 3277 3278 static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev, 3279 enum rtw89_phy_c2h_rfk_log_func func, 3280 void *content, u16 len) 3281 { 3282 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 3283 const struct rtw89_c2h_rf_run_log *log = content; 3284 const struct rtw89_fw_element_hdr *elm; 3285 u32 fmt_idx; 3286 u16 offset; 3287 3288 if (sizeof(*log) != len) 3289 return false; 3290 3291 if (!elm_info->rfk_log_fmt) 3292 return false; 3293 3294 elm = elm_info->rfk_log_fmt->elm[func]; 3295 fmt_idx = le32_to_cpu(log->fmt_idx); 3296 if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr) 3297 return false; 3298 3299 offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]); 3300 if (offset == 0) 3301 return false; 3302 3303 rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset], 3304 le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]), 3305 le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3])); 3306 3307 return true; 3308 } 3309 3310 static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 3311 u32 len, enum rtw89_phy_c2h_rfk_log_func func, 3312 const char *rfk_name) 3313 { 3314 struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data; 3315 struct rtw89_c2h_rf_log_hdr *log_hdr; 3316 #if defined(__linux__) 3317 void *log_ptr = c2h_hdr; 3318 #elif defined(__FreeBSD__) 3319 u8 *log_ptr = (void *)c2h_hdr; 3320 #endif 3321 u16 content_len; 3322 u16 chunk_len; 3323 bool handled; 3324 3325 if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK)) 3326 return; 3327 3328 log_ptr += sizeof(*c2h_hdr); 3329 len -= sizeof(*c2h_hdr); 3330 3331 while (len > sizeof(*log_hdr)) { 3332 #if defined(__linux__) 3333 log_hdr = log_ptr; 3334 #elif defined(__FreeBSD__) 3335 log_hdr = (void *)log_ptr; 3336 #endif 3337 content_len = le16_to_cpu(log_hdr->len); 3338 chunk_len = content_len + sizeof(*log_hdr); 3339 3340 if (chunk_len > len) 3341 break; 3342 3343 switch (log_hdr->type) { 3344 case RTW89_RF_RUN_LOG: 3345 handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func, 3346 log_hdr->content, content_len); 3347 if (handled) 3348 break; 3349 3350 rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n", 3351 rfk_name, content_len, log_hdr->content); 3352 break; 3353 case RTW89_RF_RPT_LOG: 3354 rtw89_phy_c2h_rfk_rpt_log(rtwdev, func, 3355 log_hdr->content, content_len); 3356 break; 3357 default: 3358 return; 3359 } 3360 3361 log_ptr += chunk_len; 3362 len -= chunk_len; 3363 } 3364 } 3365 3366 static void 3367 rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3368 { 3369 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3370 RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK"); 3371 } 3372 3373 static void 3374 rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3375 { 3376 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3377 RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK"); 3378 } 3379 3380 static void 3381 rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3382 { 3383 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3384 RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK"); 3385 } 3386 3387 static void 3388 rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3389 { 3390 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3391 RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK"); 3392 } 3393 3394 static void 3395 rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3396 { 3397 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3398 RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI"); 3399 } 3400 3401 static void 3402 rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3403 { 3404 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3405 RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK"); 3406 } 3407 3408 static 3409 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev, 3410 struct sk_buff *c2h, u32 len) = { 3411 [RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk, 3412 [RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk, 3413 [RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack, 3414 [RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck, 3415 [RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi, 3416 [RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk, 3417 }; 3418 3419 static 3420 void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev) 3421 { 3422 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3423 3424 wait->state = RTW89_RFK_STATE_START; 3425 wait->start_time = ktime_get(); 3426 reinit_completion(&wait->completion); 3427 } 3428 3429 static 3430 int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name, 3431 unsigned int ms) 3432 { 3433 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3434 unsigned long time_left; 3435 3436 /* Since we can't receive C2H event during SER, use a fixed delay. */ 3437 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) { 3438 fsleep(1000 * ms / 2); 3439 goto out; 3440 } 3441 3442 time_left = wait_for_completion_timeout(&wait->completion, 3443 msecs_to_jiffies(ms)); 3444 if (time_left == 0) { 3445 rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name); 3446 return -ETIMEDOUT; 3447 } else if (wait->state != RTW89_RFK_STATE_OK) { 3448 rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n", 3449 rfk_name, wait->state); 3450 return -EFAULT; 3451 } 3452 3453 out: 3454 #if defined(__linux__) 3455 rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n", 3456 rfk_name, ktime_ms_delta(ktime_get(), wait->start_time)); 3457 #elif defined(__FreeBSD__) 3458 rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %jd ms to complete\n", 3459 rfk_name, ktime_ms_delta(ktime_get(), (intmax_t)wait->start_time)); 3460 #endif 3461 3462 return 0; 3463 } 3464 3465 static void 3466 rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3467 { 3468 const struct rtw89_c2h_rfk_report *report = 3469 (const struct rtw89_c2h_rfk_report *)c2h->data; 3470 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3471 3472 wait->state = report->state; 3473 wait->version = report->version; 3474 3475 complete(&wait->completion); 3476 3477 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3478 "RFK report state %d with version %d (%*ph)\n", 3479 wait->state, wait->version, 3480 (int)(len - sizeof(report->hdr)), &report->state); 3481 } 3482 3483 static void 3484 rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3485 { 3486 } 3487 3488 static 3489 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev, 3490 struct sk_buff *c2h, u32 len) = { 3491 [RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state, 3492 [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr, 3493 }; 3494 3495 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func) 3496 { 3497 switch (class) { 3498 case RTW89_PHY_C2H_RFK_LOG: 3499 switch (func) { 3500 case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK: 3501 case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK: 3502 case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK: 3503 case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK: 3504 case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI: 3505 case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK: 3506 return true; 3507 default: 3508 return false; 3509 } 3510 case RTW89_PHY_C2H_RFK_REPORT: 3511 switch (func) { 3512 case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE: 3513 return true; 3514 default: 3515 return false; 3516 } 3517 default: 3518 return false; 3519 } 3520 } 3521 3522 void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3523 u32 len, u8 class, u8 func) 3524 { 3525 void (*handler)(struct rtw89_dev *rtwdev, 3526 struct sk_buff *c2h, u32 len) = NULL; 3527 3528 switch (class) { 3529 case RTW89_PHY_C2H_CLASS_RA: 3530 if (func < RTW89_PHY_C2H_FUNC_RA_MAX) 3531 handler = rtw89_phy_c2h_ra_handler[func]; 3532 break; 3533 case RTW89_PHY_C2H_RFK_LOG: 3534 if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler)) 3535 handler = rtw89_phy_c2h_rfk_log_handler[func]; 3536 break; 3537 case RTW89_PHY_C2H_RFK_REPORT: 3538 if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler)) 3539 handler = rtw89_phy_c2h_rfk_report_handler[func]; 3540 break; 3541 case RTW89_PHY_C2H_CLASS_DM: 3542 if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY) 3543 return; 3544 fallthrough; 3545 default: 3546 rtw89_info(rtwdev, "PHY c2h class %d not support\n", class); 3547 return; 3548 } 3549 if (!handler) { 3550 rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class, 3551 func); 3552 return; 3553 } 3554 handler(rtwdev, skb, len); 3555 } 3556 3557 int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev, 3558 enum rtw89_phy_idx phy_idx, 3559 unsigned int ms) 3560 { 3561 int ret; 3562 3563 rtw89_phy_rfk_report_prep(rtwdev); 3564 3565 ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx); 3566 if (ret) 3567 return ret; 3568 3569 return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms); 3570 } 3571 EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait); 3572 3573 int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev, 3574 enum rtw89_phy_idx phy_idx, 3575 const struct rtw89_chan *chan, 3576 enum rtw89_tssi_mode tssi_mode, 3577 unsigned int ms) 3578 { 3579 int ret; 3580 3581 rtw89_phy_rfk_report_prep(rtwdev); 3582 3583 ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode); 3584 if (ret) 3585 return ret; 3586 3587 return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms); 3588 } 3589 EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait); 3590 3591 int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev, 3592 enum rtw89_phy_idx phy_idx, 3593 const struct rtw89_chan *chan, 3594 unsigned int ms) 3595 { 3596 int ret; 3597 3598 rtw89_phy_rfk_report_prep(rtwdev); 3599 3600 ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan); 3601 if (ret) 3602 return ret; 3603 3604 return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms); 3605 } 3606 EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait); 3607 3608 int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev, 3609 enum rtw89_phy_idx phy_idx, 3610 const struct rtw89_chan *chan, 3611 unsigned int ms) 3612 { 3613 int ret; 3614 3615 rtw89_phy_rfk_report_prep(rtwdev); 3616 3617 ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan); 3618 if (ret) 3619 return ret; 3620 3621 return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms); 3622 } 3623 EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait); 3624 3625 int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev, 3626 enum rtw89_phy_idx phy_idx, 3627 const struct rtw89_chan *chan, 3628 unsigned int ms) 3629 { 3630 int ret; 3631 3632 rtw89_phy_rfk_report_prep(rtwdev); 3633 3634 ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan); 3635 if (ret) 3636 return ret; 3637 3638 return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms); 3639 } 3640 EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait); 3641 3642 int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev, 3643 enum rtw89_phy_idx phy_idx, 3644 const struct rtw89_chan *chan, 3645 unsigned int ms) 3646 { 3647 int ret; 3648 3649 rtw89_phy_rfk_report_prep(rtwdev); 3650 3651 ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan); 3652 if (ret) 3653 return ret; 3654 3655 return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms); 3656 } 3657 EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait); 3658 3659 int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev, 3660 enum rtw89_phy_idx phy_idx, 3661 const struct rtw89_chan *chan, 3662 bool is_chl_k, unsigned int ms) 3663 { 3664 int ret; 3665 3666 rtw89_phy_rfk_report_prep(rtwdev); 3667 3668 ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan, is_chl_k); 3669 if (ret) 3670 return ret; 3671 3672 return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms); 3673 } 3674 EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait); 3675 3676 static u32 phy_tssi_get_cck_group(u8 ch) 3677 { 3678 switch (ch) { 3679 case 1 ... 2: 3680 return 0; 3681 case 3 ... 5: 3682 return 1; 3683 case 6 ... 8: 3684 return 2; 3685 case 9 ... 11: 3686 return 3; 3687 case 12 ... 13: 3688 return 4; 3689 case 14: 3690 return 5; 3691 } 3692 3693 return 0; 3694 } 3695 3696 #define PHY_TSSI_EXTRA_GROUP_BIT BIT(31) 3697 #define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx)) 3698 #define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT) 3699 #define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \ 3700 ((group) & ~PHY_TSSI_EXTRA_GROUP_BIT) 3701 #define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \ 3702 (PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 3703 3704 static u32 phy_tssi_get_ofdm_group(u8 ch) 3705 { 3706 switch (ch) { 3707 case 1 ... 2: 3708 return 0; 3709 case 3 ... 5: 3710 return 1; 3711 case 6 ... 8: 3712 return 2; 3713 case 9 ... 11: 3714 return 3; 3715 case 12 ... 14: 3716 return 4; 3717 case 36 ... 40: 3718 return 5; 3719 case 41 ... 43: 3720 return PHY_TSSI_EXTRA_GROUP(5); 3721 case 44 ... 48: 3722 return 6; 3723 case 49 ... 51: 3724 return PHY_TSSI_EXTRA_GROUP(6); 3725 case 52 ... 56: 3726 return 7; 3727 case 57 ... 59: 3728 return PHY_TSSI_EXTRA_GROUP(7); 3729 case 60 ... 64: 3730 return 8; 3731 case 100 ... 104: 3732 return 9; 3733 case 105 ... 107: 3734 return PHY_TSSI_EXTRA_GROUP(9); 3735 case 108 ... 112: 3736 return 10; 3737 case 113 ... 115: 3738 return PHY_TSSI_EXTRA_GROUP(10); 3739 case 116 ... 120: 3740 return 11; 3741 case 121 ... 123: 3742 return PHY_TSSI_EXTRA_GROUP(11); 3743 case 124 ... 128: 3744 return 12; 3745 case 129 ... 131: 3746 return PHY_TSSI_EXTRA_GROUP(12); 3747 case 132 ... 136: 3748 return 13; 3749 case 137 ... 139: 3750 return PHY_TSSI_EXTRA_GROUP(13); 3751 case 140 ... 144: 3752 return 14; 3753 case 149 ... 153: 3754 return 15; 3755 case 154 ... 156: 3756 return PHY_TSSI_EXTRA_GROUP(15); 3757 case 157 ... 161: 3758 return 16; 3759 case 162 ... 164: 3760 return PHY_TSSI_EXTRA_GROUP(16); 3761 case 165 ... 169: 3762 return 17; 3763 case 170 ... 172: 3764 return PHY_TSSI_EXTRA_GROUP(17); 3765 case 173 ... 177: 3766 return 18; 3767 } 3768 3769 return 0; 3770 } 3771 3772 static u32 phy_tssi_get_6g_ofdm_group(u8 ch) 3773 { 3774 switch (ch) { 3775 case 1 ... 5: 3776 return 0; 3777 case 6 ... 8: 3778 return PHY_TSSI_EXTRA_GROUP(0); 3779 case 9 ... 13: 3780 return 1; 3781 case 14 ... 16: 3782 return PHY_TSSI_EXTRA_GROUP(1); 3783 case 17 ... 21: 3784 return 2; 3785 case 22 ... 24: 3786 return PHY_TSSI_EXTRA_GROUP(2); 3787 case 25 ... 29: 3788 return 3; 3789 case 33 ... 37: 3790 return 4; 3791 case 38 ... 40: 3792 return PHY_TSSI_EXTRA_GROUP(4); 3793 case 41 ... 45: 3794 return 5; 3795 case 46 ... 48: 3796 return PHY_TSSI_EXTRA_GROUP(5); 3797 case 49 ... 53: 3798 return 6; 3799 case 54 ... 56: 3800 return PHY_TSSI_EXTRA_GROUP(6); 3801 case 57 ... 61: 3802 return 7; 3803 case 65 ... 69: 3804 return 8; 3805 case 70 ... 72: 3806 return PHY_TSSI_EXTRA_GROUP(8); 3807 case 73 ... 77: 3808 return 9; 3809 case 78 ... 80: 3810 return PHY_TSSI_EXTRA_GROUP(9); 3811 case 81 ... 85: 3812 return 10; 3813 case 86 ... 88: 3814 return PHY_TSSI_EXTRA_GROUP(10); 3815 case 89 ... 93: 3816 return 11; 3817 case 97 ... 101: 3818 return 12; 3819 case 102 ... 104: 3820 return PHY_TSSI_EXTRA_GROUP(12); 3821 case 105 ... 109: 3822 return 13; 3823 case 110 ... 112: 3824 return PHY_TSSI_EXTRA_GROUP(13); 3825 case 113 ... 117: 3826 return 14; 3827 case 118 ... 120: 3828 return PHY_TSSI_EXTRA_GROUP(14); 3829 case 121 ... 125: 3830 return 15; 3831 case 129 ... 133: 3832 return 16; 3833 case 134 ... 136: 3834 return PHY_TSSI_EXTRA_GROUP(16); 3835 case 137 ... 141: 3836 return 17; 3837 case 142 ... 144: 3838 return PHY_TSSI_EXTRA_GROUP(17); 3839 case 145 ... 149: 3840 return 18; 3841 case 150 ... 152: 3842 return PHY_TSSI_EXTRA_GROUP(18); 3843 case 153 ... 157: 3844 return 19; 3845 case 161 ... 165: 3846 return 20; 3847 case 166 ... 168: 3848 return PHY_TSSI_EXTRA_GROUP(20); 3849 case 169 ... 173: 3850 return 21; 3851 case 174 ... 176: 3852 return PHY_TSSI_EXTRA_GROUP(21); 3853 case 177 ... 181: 3854 return 22; 3855 case 182 ... 184: 3856 return PHY_TSSI_EXTRA_GROUP(22); 3857 case 185 ... 189: 3858 return 23; 3859 case 193 ... 197: 3860 return 24; 3861 case 198 ... 200: 3862 return PHY_TSSI_EXTRA_GROUP(24); 3863 case 201 ... 205: 3864 return 25; 3865 case 206 ... 208: 3866 return PHY_TSSI_EXTRA_GROUP(25); 3867 case 209 ... 213: 3868 return 26; 3869 case 214 ... 216: 3870 return PHY_TSSI_EXTRA_GROUP(26); 3871 case 217 ... 221: 3872 return 27; 3873 case 225 ... 229: 3874 return 28; 3875 case 230 ... 232: 3876 return PHY_TSSI_EXTRA_GROUP(28); 3877 case 233 ... 237: 3878 return 29; 3879 case 238 ... 240: 3880 return PHY_TSSI_EXTRA_GROUP(29); 3881 case 241 ... 245: 3882 return 30; 3883 case 246 ... 248: 3884 return PHY_TSSI_EXTRA_GROUP(30); 3885 case 249 ... 253: 3886 return 31; 3887 } 3888 3889 return 0; 3890 } 3891 3892 static u32 phy_tssi_get_trim_group(u8 ch) 3893 { 3894 switch (ch) { 3895 case 1 ... 8: 3896 return 0; 3897 case 9 ... 14: 3898 return 1; 3899 case 36 ... 48: 3900 return 2; 3901 case 49 ... 51: 3902 return PHY_TSSI_EXTRA_GROUP(2); 3903 case 52 ... 64: 3904 return 3; 3905 case 100 ... 112: 3906 return 4; 3907 case 113 ... 115: 3908 return PHY_TSSI_EXTRA_GROUP(4); 3909 case 116 ... 128: 3910 return 5; 3911 case 132 ... 144: 3912 return 6; 3913 case 149 ... 177: 3914 return 7; 3915 } 3916 3917 return 0; 3918 } 3919 3920 static u32 phy_tssi_get_6g_trim_group(u8 ch) 3921 { 3922 switch (ch) { 3923 case 1 ... 13: 3924 return 0; 3925 case 14 ... 16: 3926 return PHY_TSSI_EXTRA_GROUP(0); 3927 case 17 ... 29: 3928 return 1; 3929 case 33 ... 45: 3930 return 2; 3931 case 46 ... 48: 3932 return PHY_TSSI_EXTRA_GROUP(2); 3933 case 49 ... 61: 3934 return 3; 3935 case 65 ... 77: 3936 return 4; 3937 case 78 ... 80: 3938 return PHY_TSSI_EXTRA_GROUP(4); 3939 case 81 ... 93: 3940 return 5; 3941 case 97 ... 109: 3942 return 6; 3943 case 110 ... 112: 3944 return PHY_TSSI_EXTRA_GROUP(6); 3945 case 113 ... 125: 3946 return 7; 3947 case 129 ... 141: 3948 return 8; 3949 case 142 ... 144: 3950 return PHY_TSSI_EXTRA_GROUP(8); 3951 case 145 ... 157: 3952 return 9; 3953 case 161 ... 173: 3954 return 10; 3955 case 174 ... 176: 3956 return PHY_TSSI_EXTRA_GROUP(10); 3957 case 177 ... 189: 3958 return 11; 3959 case 193 ... 205: 3960 return 12; 3961 case 206 ... 208: 3962 return PHY_TSSI_EXTRA_GROUP(12); 3963 case 209 ... 221: 3964 return 13; 3965 case 225 ... 237: 3966 return 14; 3967 case 238 ... 240: 3968 return PHY_TSSI_EXTRA_GROUP(14); 3969 case 241 ... 253: 3970 return 15; 3971 } 3972 3973 return 0; 3974 } 3975 3976 static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev, 3977 enum rtw89_phy_idx phy, 3978 const struct rtw89_chan *chan, 3979 enum rtw89_rf_path path) 3980 { 3981 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3982 enum rtw89_band band = chan->band_type; 3983 u8 ch = chan->channel; 3984 u32 gidx_1st; 3985 u32 gidx_2nd; 3986 s8 de_1st; 3987 s8 de_2nd; 3988 u32 gidx; 3989 s8 val; 3990 3991 if (band == RTW89_BAND_6G) 3992 goto calc_6g; 3993 3994 gidx = phy_tssi_get_ofdm_group(ch); 3995 3996 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3997 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3998 path, gidx); 3999 4000 if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) { 4001 gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx); 4002 gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx); 4003 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 4004 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 4005 val = (de_1st + de_2nd) / 2; 4006 4007 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4008 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 4009 path, val, de_1st, de_2nd); 4010 } else { 4011 val = tssi_info->tssi_mcs[path][gidx]; 4012 4013 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4014 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 4015 } 4016 4017 return val; 4018 4019 calc_6g: 4020 gidx = phy_tssi_get_6g_ofdm_group(ch); 4021 4022 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4023 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 4024 path, gidx); 4025 4026 if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) { 4027 gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx); 4028 gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx); 4029 de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st]; 4030 de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd]; 4031 val = (de_1st + de_2nd) / 2; 4032 4033 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4034 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 4035 path, val, de_1st, de_2nd); 4036 } else { 4037 val = tssi_info->tssi_6g_mcs[path][gidx]; 4038 4039 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4040 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 4041 } 4042 4043 return val; 4044 } 4045 4046 static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, 4047 enum rtw89_phy_idx phy, 4048 const struct rtw89_chan *chan, 4049 enum rtw89_rf_path path) 4050 { 4051 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 4052 enum rtw89_band band = chan->band_type; 4053 u8 ch = chan->channel; 4054 u32 tgidx_1st; 4055 u32 tgidx_2nd; 4056 s8 tde_1st; 4057 s8 tde_2nd; 4058 u32 tgidx; 4059 s8 val; 4060 4061 if (band == RTW89_BAND_6G) 4062 goto calc_6g; 4063 4064 tgidx = phy_tssi_get_trim_group(ch); 4065 4066 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4067 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 4068 path, tgidx); 4069 4070 if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) { 4071 tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 4072 tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 4073 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 4074 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 4075 val = (tde_1st + tde_2nd) / 2; 4076 4077 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4078 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 4079 path, val, tde_1st, tde_2nd); 4080 } else { 4081 val = tssi_info->tssi_trim[path][tgidx]; 4082 4083 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4084 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 4085 path, val); 4086 } 4087 4088 return val; 4089 4090 calc_6g: 4091 tgidx = phy_tssi_get_6g_trim_group(ch); 4092 4093 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4094 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 4095 path, tgidx); 4096 4097 if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) { 4098 tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 4099 tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 4100 tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st]; 4101 tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd]; 4102 val = (tde_1st + tde_2nd) / 2; 4103 4104 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4105 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 4106 path, val, tde_1st, tde_2nd); 4107 } else { 4108 val = tssi_info->tssi_trim_6g[path][tgidx]; 4109 4110 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4111 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 4112 path, val); 4113 } 4114 4115 return val; 4116 } 4117 4118 void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev, 4119 enum rtw89_phy_idx phy, 4120 const struct rtw89_chan *chan, 4121 struct rtw89_h2c_rf_tssi *h2c) 4122 { 4123 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 4124 u8 ch = chan->channel; 4125 s8 trim_de; 4126 s8 ofdm_de; 4127 s8 cck_de; 4128 u8 gidx; 4129 s8 val; 4130 int i; 4131 4132 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 4133 phy, ch); 4134 4135 for (i = RF_PATH_A; i <= RF_PATH_B; i++) { 4136 trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i); 4137 h2c->curr_tssi_trim_de[i] = trim_de; 4138 4139 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4140 "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de); 4141 4142 gidx = phy_tssi_get_cck_group(ch); 4143 cck_de = tssi_info->tssi_cck[i][gidx]; 4144 val = u32_get_bits(cck_de + trim_de, 0xff); 4145 4146 h2c->curr_tssi_cck_de[i] = 0x0; 4147 h2c->curr_tssi_cck_de_20m[i] = val; 4148 h2c->curr_tssi_cck_de_40m[i] = val; 4149 h2c->curr_tssi_efuse_cck_de[i] = cck_de; 4150 4151 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4152 "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de); 4153 4154 ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i); 4155 val = u32_get_bits(ofdm_de + trim_de, 0xff); 4156 4157 h2c->curr_tssi_ofdm_de[i] = 0x0; 4158 h2c->curr_tssi_ofdm_de_20m[i] = val; 4159 h2c->curr_tssi_ofdm_de_40m[i] = val; 4160 h2c->curr_tssi_ofdm_de_80m[i] = val; 4161 h2c->curr_tssi_ofdm_de_160m[i] = val; 4162 h2c->curr_tssi_ofdm_de_320m[i] = val; 4163 h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de; 4164 4165 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4166 "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de); 4167 } 4168 } 4169 4170 void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev, 4171 enum rtw89_phy_idx phy, 4172 const struct rtw89_chan *chan, 4173 struct rtw89_h2c_rf_tssi *h2c) 4174 { 4175 struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk; 4176 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 4177 const s8 *thm_up[RF_PATH_B + 1] = {}; 4178 const s8 *thm_down[RF_PATH_B + 1] = {}; 4179 u8 subband = chan->subband_type; 4180 s8 thm_ofst[128] = {0}; 4181 u8 thermal; 4182 u8 path; 4183 u8 i, j; 4184 4185 switch (subband) { 4186 default: 4187 case RTW89_CH_2G: 4188 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0]; 4189 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0]; 4190 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0]; 4191 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0]; 4192 break; 4193 case RTW89_CH_5G_BAND_1: 4194 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0]; 4195 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0]; 4196 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0]; 4197 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0]; 4198 break; 4199 case RTW89_CH_5G_BAND_3: 4200 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1]; 4201 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1]; 4202 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1]; 4203 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1]; 4204 break; 4205 case RTW89_CH_5G_BAND_4: 4206 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2]; 4207 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2]; 4208 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2]; 4209 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2]; 4210 break; 4211 case RTW89_CH_6G_BAND_IDX0: 4212 case RTW89_CH_6G_BAND_IDX1: 4213 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0]; 4214 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0]; 4215 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0]; 4216 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0]; 4217 break; 4218 case RTW89_CH_6G_BAND_IDX2: 4219 case RTW89_CH_6G_BAND_IDX3: 4220 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1]; 4221 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1]; 4222 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1]; 4223 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1]; 4224 break; 4225 case RTW89_CH_6G_BAND_IDX4: 4226 case RTW89_CH_6G_BAND_IDX5: 4227 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2]; 4228 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2]; 4229 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2]; 4230 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2]; 4231 break; 4232 case RTW89_CH_6G_BAND_IDX6: 4233 case RTW89_CH_6G_BAND_IDX7: 4234 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3]; 4235 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3]; 4236 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3]; 4237 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3]; 4238 break; 4239 } 4240 4241 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4242 "[TSSI] tmeter tbl on subband: %u\n", subband); 4243 4244 for (path = RF_PATH_A; path <= RF_PATH_B; path++) { 4245 thermal = tssi_info->thermal[path]; 4246 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4247 "path: %u, pg thermal: 0x%x\n", path, thermal); 4248 4249 if (thermal == 0xff) { 4250 h2c->pg_thermal[path] = 0x38; 4251 memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path])); 4252 continue; 4253 } 4254 4255 h2c->pg_thermal[path] = thermal; 4256 4257 i = 0; 4258 for (j = 0; j < 64; j++) 4259 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 4260 thm_up[path][i++] : 4261 thm_up[path][DELTA_SWINGIDX_SIZE - 1]; 4262 4263 i = 1; 4264 for (j = 127; j >= 64; j--) 4265 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 4266 -thm_down[path][i++] : 4267 -thm_down[path][DELTA_SWINGIDX_SIZE - 1]; 4268 4269 for (i = 0; i < 128; i += 4) { 4270 h2c->ftable[path][i + 0] = thm_ofst[i + 3]; 4271 h2c->ftable[path][i + 1] = thm_ofst[i + 2]; 4272 h2c->ftable[path][i + 2] = thm_ofst[i + 1]; 4273 h2c->ftable[path][i + 3] = thm_ofst[i + 0]; 4274 4275 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4276 "thm ofst [%x]: %02x %02x %02x %02x\n", 4277 i, thm_ofst[i], thm_ofst[i + 1], 4278 thm_ofst[i + 2], thm_ofst[i + 3]); 4279 } 4280 } 4281 } 4282 4283 static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo) 4284 { 4285 const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info; 4286 u32 reg_mask; 4287 4288 if (sc_xo) 4289 reg_mask = xtal->sc_xo_mask; 4290 else 4291 reg_mask = xtal->sc_xi_mask; 4292 4293 return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask); 4294 } 4295 4296 static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo, 4297 u8 val) 4298 { 4299 const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info; 4300 u32 reg_mask; 4301 4302 if (sc_xo) 4303 reg_mask = xtal->sc_xo_mask; 4304 else 4305 reg_mask = xtal->sc_xi_mask; 4306 4307 rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val); 4308 } 4309 4310 static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev, 4311 u8 crystal_cap, bool force) 4312 { 4313 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4314 const struct rtw89_chip_info *chip = rtwdev->chip; 4315 u8 sc_xi_val, sc_xo_val; 4316 4317 if (!force && cfo->crystal_cap == crystal_cap) 4318 return; 4319 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) { 4320 rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap); 4321 rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap); 4322 sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true); 4323 sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false); 4324 } else { 4325 rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, 4326 crystal_cap, XTAL_SC_XO_MASK); 4327 rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, 4328 crystal_cap, XTAL_SC_XI_MASK); 4329 rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val); 4330 rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val); 4331 } 4332 cfo->crystal_cap = sc_xi_val; 4333 cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap); 4334 4335 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val); 4336 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val); 4337 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n", 4338 cfo->x_cap_ofst); 4339 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n"); 4340 } 4341 4342 static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev) 4343 { 4344 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4345 u8 cap; 4346 4347 cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK; 4348 cfo->is_adjust = false; 4349 if (cfo->crystal_cap == cfo->def_x_cap) 4350 return; 4351 cap = cfo->crystal_cap; 4352 cap += (cap > cfo->def_x_cap ? -1 : 1); 4353 rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false); 4354 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4355 "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap, 4356 cfo->def_x_cap); 4357 } 4358 4359 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) 4360 { 4361 const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp; 4362 bool is_linked = rtwdev->total_sta_assoc > 0; 4363 s32 cfo_avg_312; 4364 s32 dcfo_comp_val; 4365 int sign; 4366 4367 if (rtwdev->chip->chip_id == RTL8922A) 4368 return; 4369 4370 if (!is_linked) { 4371 rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n", 4372 is_linked); 4373 return; 4374 } 4375 rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo); 4376 if (curr_cfo == 0) 4377 return; 4378 dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO); 4379 sign = curr_cfo > 0 ? 1 : -1; 4380 cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val; 4381 rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312); 4382 if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) 4383 cfo_avg_312 = -cfo_avg_312; 4384 rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask, 4385 cfo_avg_312); 4386 } 4387 4388 static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev) 4389 { 4390 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4391 const struct rtw89_chip_info *chip = rtwdev->chip; 4392 const struct rtw89_cfo_regs *cfo = phy->cfo; 4393 4394 rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1); 4395 rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8); 4396 4397 if (chip->chip_gen == RTW89_CHIP_AX) { 4398 if (chip->cfo_hw_comp) { 4399 rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2, 4400 B_AX_PWR_UL_CFO_MASK, 0x6); 4401 } else { 4402 rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); 4403 rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, 4404 B_AX_PWR_UL_CFO_MASK); 4405 } 4406 } 4407 } 4408 4409 static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) 4410 { 4411 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4412 struct rtw89_efuse *efuse = &rtwdev->efuse; 4413 4414 cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK; 4415 cfo->crystal_cap = cfo->crystal_cap_default; 4416 cfo->def_x_cap = cfo->crystal_cap; 4417 cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f); 4418 cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1); 4419 cfo->is_adjust = false; 4420 cfo->divergence_lock_en = false; 4421 cfo->x_cap_ofst = 0; 4422 cfo->lock_cnt = 0; 4423 cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE; 4424 cfo->apply_compensation = false; 4425 cfo->residual_cfo_acc = 0; 4426 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n", 4427 cfo->crystal_cap_default); 4428 rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true); 4429 rtw89_dcfo_comp_init(rtwdev); 4430 cfo->cfo_timer_ms = 2000; 4431 cfo->cfo_trig_by_timer_en = false; 4432 cfo->phy_cfo_trk_cnt = 0; 4433 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4434 cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE; 4435 } 4436 4437 static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev, 4438 s32 curr_cfo) 4439 { 4440 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4441 int crystal_cap = cfo->crystal_cap; 4442 s32 cfo_abs = abs(curr_cfo); 4443 int sign; 4444 4445 if (curr_cfo == 0) { 4446 rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); 4447 return; 4448 } 4449 if (!cfo->is_adjust) { 4450 if (cfo_abs > CFO_TRK_ENABLE_TH) 4451 cfo->is_adjust = true; 4452 } else { 4453 if (cfo_abs <= CFO_TRK_STOP_TH) 4454 cfo->is_adjust = false; 4455 } 4456 if (!cfo->is_adjust) { 4457 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n"); 4458 return; 4459 } 4460 sign = curr_cfo > 0 ? 1 : -1; 4461 if (cfo_abs > CFO_TRK_STOP_TH_4) 4462 crystal_cap += 3 * sign; 4463 else if (cfo_abs > CFO_TRK_STOP_TH_3) 4464 crystal_cap += 3 * sign; 4465 else if (cfo_abs > CFO_TRK_STOP_TH_2) 4466 crystal_cap += 1 * sign; 4467 else if (cfo_abs > CFO_TRK_STOP_TH_1) 4468 crystal_cap += 1 * sign; 4469 else 4470 return; 4471 4472 crystal_cap = clamp(crystal_cap, 0, 127); 4473 rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false); 4474 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4475 "X_cap{Curr,Default}={0x%x,0x%x}\n", 4476 cfo->crystal_cap, cfo->def_x_cap); 4477 } 4478 4479 static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev) 4480 { 4481 const struct rtw89_chip_info *chip = rtwdev->chip; 4482 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4483 s32 cfo_khz_all = 0; 4484 s32 cfo_cnt_all = 0; 4485 s32 cfo_all_avg = 0; 4486 u8 i; 4487 4488 if (rtwdev->total_sta_assoc != 1) 4489 return 0; 4490 rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n"); 4491 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4492 if (cfo->cfo_cnt[i] == 0) 4493 continue; 4494 cfo_khz_all += cfo->cfo_tail[i]; 4495 cfo_cnt_all += cfo->cfo_cnt[i]; 4496 cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all); 4497 cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; 4498 cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft, 4499 cfo_cnt_all); 4500 } 4501 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4502 "CFO track for macid = %d\n", i); 4503 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4504 "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n", 4505 cfo_khz_all, cfo_cnt_all, cfo_all_avg); 4506 return cfo_all_avg; 4507 } 4508 4509 static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev) 4510 { 4511 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4512 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4513 s32 target_cfo = 0; 4514 s32 cfo_khz_all = 0; 4515 s32 cfo_khz_all_tp_wgt = 0; 4516 s32 cfo_avg = 0; 4517 s32 max_cfo_lb = BIT(31); 4518 s32 min_cfo_ub = GENMASK(30, 0); 4519 u16 cfo_cnt_all = 0; 4520 u8 active_entry_cnt = 0; 4521 u8 sta_cnt = 0; 4522 u32 tp_all = 0; 4523 u8 i; 4524 u8 cfo_tol = 0; 4525 4526 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n"); 4527 if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) { 4528 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n"); 4529 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4530 if (cfo->cfo_cnt[i] == 0) 4531 continue; 4532 cfo_khz_all += cfo->cfo_tail[i]; 4533 cfo_cnt_all += cfo->cfo_cnt[i]; 4534 cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all); 4535 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4536 "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n", 4537 cfo_khz_all, cfo_cnt_all, cfo_avg); 4538 target_cfo = cfo_avg; 4539 } 4540 } else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) { 4541 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n"); 4542 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4543 if (cfo->cfo_cnt[i] == 0) 4544 continue; 4545 cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], 4546 (s32)cfo->cfo_cnt[i]); 4547 cfo_khz_all += cfo->cfo_avg[i]; 4548 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4549 "Macid=%d, cfo_avg=%d\n", i, 4550 cfo->cfo_avg[i]); 4551 } 4552 sta_cnt = rtwdev->total_sta_assoc; 4553 cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt); 4554 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4555 "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n", 4556 cfo_khz_all, sta_cnt, cfo_avg); 4557 target_cfo = cfo_avg; 4558 } else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) { 4559 rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n"); 4560 cfo_tol = cfo->sta_cfo_tolerance; 4561 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4562 sta_cnt++; 4563 if (cfo->cfo_cnt[i] != 0) { 4564 cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], 4565 (s32)cfo->cfo_cnt[i]); 4566 active_entry_cnt++; 4567 } else { 4568 cfo->cfo_avg[i] = cfo->pre_cfo_avg[i]; 4569 } 4570 max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb); 4571 min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub); 4572 cfo_khz_all += cfo->cfo_avg[i]; 4573 /* need tp for each entry */ 4574 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4575 "[%d] cfo_avg=%d, tp=tbd\n", 4576 i, cfo->cfo_avg[i]); 4577 if (sta_cnt >= rtwdev->total_sta_assoc) 4578 break; 4579 } 4580 tp_all = stats->rx_throughput; /* need tp for each entry */ 4581 cfo_avg = phy_div(cfo_khz_all_tp_wgt, (s32)tp_all); 4582 4583 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n", 4584 sta_cnt); 4585 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n", 4586 active_entry_cnt); 4587 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4588 "Msta cfo with tp_wgt=%d, avg_cfo=%d\n", 4589 cfo_khz_all_tp_wgt, cfo_avg); 4590 rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n", 4591 max_cfo_lb, min_cfo_ub); 4592 if (max_cfo_lb <= min_cfo_ub) { 4593 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4594 "cfo win_size=%d\n", 4595 min_cfo_ub - max_cfo_lb); 4596 target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub); 4597 } else { 4598 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4599 "No intersection of cfo tolerance windows\n"); 4600 target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt); 4601 } 4602 for (i = 0; i < CFO_TRACK_MAX_USER; i++) 4603 cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; 4604 } 4605 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo); 4606 return target_cfo; 4607 } 4608 4609 static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev) 4610 { 4611 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4612 4613 memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail)); 4614 memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt)); 4615 cfo->packet_count = 0; 4616 cfo->packet_count_pre = 0; 4617 cfo->cfo_avg_pre = 0; 4618 } 4619 4620 static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) 4621 { 4622 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4623 s32 new_cfo = 0; 4624 bool x_cap_update = false; 4625 u8 pre_x_cap = cfo->crystal_cap; 4626 u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft; 4627 4628 cfo->dcfo_avg = 0; 4629 rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n", 4630 rtwdev->total_sta_assoc); 4631 if (rtwdev->total_sta_assoc == 0) { 4632 rtw89_phy_cfo_reset(rtwdev); 4633 return; 4634 } 4635 if (cfo->packet_count == 0) { 4636 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n"); 4637 return; 4638 } 4639 if (cfo->packet_count == cfo->packet_count_pre) { 4640 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n"); 4641 return; 4642 } 4643 if (rtwdev->total_sta_assoc == 1) 4644 new_cfo = rtw89_phy_average_cfo_calc(rtwdev); 4645 else 4646 new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev); 4647 if (cfo->divergence_lock_en) { 4648 cfo->lock_cnt++; 4649 if (cfo->lock_cnt > CFO_PERIOD_CNT) { 4650 cfo->divergence_lock_en = false; 4651 cfo->lock_cnt = 0; 4652 } else { 4653 rtw89_phy_cfo_reset(rtwdev); 4654 } 4655 return; 4656 } 4657 if (cfo->crystal_cap >= cfo->x_cap_ub || 4658 cfo->crystal_cap <= cfo->x_cap_lb) { 4659 cfo->divergence_lock_en = true; 4660 rtw89_phy_cfo_reset(rtwdev); 4661 return; 4662 } 4663 4664 rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo); 4665 cfo->cfo_avg_pre = new_cfo; 4666 cfo->dcfo_avg_pre = cfo->dcfo_avg; 4667 x_cap_update = cfo->crystal_cap != pre_x_cap; 4668 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update); 4669 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n", 4670 cfo->def_x_cap, pre_x_cap, cfo->crystal_cap, 4671 cfo->x_cap_ofst); 4672 if (x_cap_update) { 4673 if (cfo->dcfo_avg > 0) 4674 cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft; 4675 else 4676 cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft; 4677 } 4678 rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg); 4679 rtw89_phy_cfo_statistics_reset(rtwdev); 4680 } 4681 4682 void rtw89_phy_cfo_track_work(struct work_struct *work) 4683 { 4684 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 4685 cfo_track_work.work); 4686 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4687 4688 mutex_lock(&rtwdev->mutex); 4689 if (!cfo->cfo_trig_by_timer_en) 4690 goto out; 4691 rtw89_leave_ps_mode(rtwdev); 4692 rtw89_phy_cfo_dm(rtwdev); 4693 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, 4694 msecs_to_jiffies(cfo->cfo_timer_ms)); 4695 out: 4696 mutex_unlock(&rtwdev->mutex); 4697 } 4698 4699 static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev) 4700 { 4701 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4702 4703 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, 4704 msecs_to_jiffies(cfo->cfo_timer_ms)); 4705 } 4706 4707 void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev) 4708 { 4709 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4710 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4711 bool is_ul_ofdma = false, ofdma_acc_en = false; 4712 4713 if (stats->rx_tf_periodic > CFO_TF_CNT_TH) 4714 is_ul_ofdma = true; 4715 if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE && 4716 is_ul_ofdma) 4717 ofdma_acc_en = true; 4718 4719 switch (cfo->phy_cfo_status) { 4720 case RTW89_PHY_DCFO_STATE_NORMAL: 4721 if (stats->tx_throughput >= CFO_TP_UPPER) { 4722 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE; 4723 cfo->cfo_trig_by_timer_en = true; 4724 cfo->cfo_timer_ms = CFO_COMP_PERIOD; 4725 rtw89_phy_cfo_start_work(rtwdev); 4726 } 4727 break; 4728 case RTW89_PHY_DCFO_STATE_ENHANCE: 4729 if (stats->tx_throughput <= CFO_TP_LOWER) 4730 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4731 else if (ofdma_acc_en && 4732 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT) 4733 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD; 4734 else 4735 cfo->phy_cfo_trk_cnt++; 4736 4737 if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) { 4738 cfo->phy_cfo_trk_cnt = 0; 4739 cfo->cfo_trig_by_timer_en = false; 4740 } 4741 break; 4742 case RTW89_PHY_DCFO_STATE_HOLD: 4743 if (stats->tx_throughput <= CFO_TP_LOWER) { 4744 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4745 cfo->phy_cfo_trk_cnt = 0; 4746 cfo->cfo_trig_by_timer_en = false; 4747 } else { 4748 cfo->phy_cfo_trk_cnt++; 4749 } 4750 break; 4751 default: 4752 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4753 cfo->phy_cfo_trk_cnt = 0; 4754 break; 4755 } 4756 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4757 "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n", 4758 stats->tx_throughput, cfo->phy_cfo_status, 4759 cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt, 4760 ewma_thermal_read(&rtwdev->phystat.avg_thermal[0])); 4761 if (cfo->cfo_trig_by_timer_en) 4762 return; 4763 rtw89_phy_cfo_dm(rtwdev); 4764 } 4765 4766 void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, 4767 struct rtw89_rx_phy_ppdu *phy_ppdu) 4768 { 4769 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4770 u8 macid = phy_ppdu->mac_id; 4771 4772 if (macid >= CFO_TRACK_MAX_USER) { 4773 rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid); 4774 return; 4775 } 4776 4777 cfo->cfo_tail[macid] += cfo_val; 4778 cfo->cfo_cnt[macid]++; 4779 cfo->packet_count++; 4780 } 4781 4782 void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4783 { 4784 const struct rtw89_chip_info *chip = rtwdev->chip; 4785 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 4786 rtwvif_link->chanctx_idx); 4787 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4788 4789 if (!chip->ul_tb_waveform_ctrl) 4790 return; 4791 4792 rtwvif_link->def_tri_idx = 4793 rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG); 4794 4795 if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV) 4796 rtwvif_link->dyn_tb_bedge_en = false; 4797 else if (chan->band_type >= RTW89_BAND_5G && 4798 chan->band_width >= RTW89_CHANNEL_WIDTH_40) 4799 rtwvif_link->dyn_tb_bedge_en = true; 4800 else 4801 rtwvif_link->dyn_tb_bedge_en = false; 4802 4803 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4804 "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n", 4805 ul_tb_info->def_if_bandedge, rtwvif_link->def_tri_idx); 4806 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4807 "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n", 4808 rtwvif_link->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en); 4809 } 4810 4811 struct rtw89_phy_ul_tb_check_data { 4812 bool valid; 4813 bool high_tf_client; 4814 bool low_tf_client; 4815 bool dyn_tb_bedge_en; 4816 u8 def_tri_idx; 4817 }; 4818 4819 struct rtw89_phy_power_diff { 4820 u32 q_00; 4821 u32 q_11; 4822 u32 q_matrix_en; 4823 u32 ultb_1t_norm_160; 4824 u32 ultb_2t_norm_160; 4825 u32 com1_norm_1sts; 4826 u32 com2_resp_1sts_path; 4827 }; 4828 4829 static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev, 4830 struct rtw89_vif_link *rtwvif_link) 4831 { 4832 static const struct rtw89_phy_power_diff table[2] = { 4833 {0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3}, 4834 {0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1}, 4835 }; 4836 const struct rtw89_phy_power_diff *param; 4837 u32 reg; 4838 4839 if (!rtwdev->chip->ul_tb_pwr_diff) 4840 return; 4841 4842 if (rtwvif_link->pwr_diff_en == rtwvif_link->pre_pwr_diff_en) { 4843 rtwvif_link->pwr_diff_en = false; 4844 return; 4845 } 4846 4847 rtwvif_link->pre_pwr_diff_en = rtwvif_link->pwr_diff_en; 4848 param = &table[rtwvif_link->pwr_diff_en]; 4849 4850 rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL, 4851 param->q_00); 4852 rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL, 4853 param->q_11); 4854 rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX, 4855 B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en); 4856 4857 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif_link->mac_idx); 4858 rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160, 4859 param->ultb_1t_norm_160); 4860 4861 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif_link->mac_idx); 4862 rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160, 4863 param->ultb_2t_norm_160); 4864 4865 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif_link->mac_idx); 4866 rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS, 4867 param->com1_norm_1sts); 4868 4869 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif_link->mac_idx); 4870 rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH, 4871 param->com2_resp_1sts_path); 4872 } 4873 4874 static 4875 void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev, 4876 struct rtw89_vif_link *rtwvif_link, 4877 struct rtw89_phy_ul_tb_check_data *ul_tb_data) 4878 { 4879 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4880 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4881 4882 if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION) 4883 return; 4884 4885 if (!vif->cfg.assoc) 4886 return; 4887 4888 if (rtwdev->chip->ul_tb_waveform_ctrl) { 4889 if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH) 4890 ul_tb_data->high_tf_client = true; 4891 else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH) 4892 ul_tb_data->low_tf_client = true; 4893 4894 ul_tb_data->valid = true; 4895 ul_tb_data->def_tri_idx = rtwvif_link->def_tri_idx; 4896 ul_tb_data->dyn_tb_bedge_en = rtwvif_link->dyn_tb_bedge_en; 4897 } 4898 4899 rtw89_phy_ofdma_power_diff(rtwdev, rtwvif_link); 4900 } 4901 4902 static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev, 4903 struct rtw89_phy_ul_tb_check_data *ul_tb_data) 4904 { 4905 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4906 4907 if (!rtwdev->chip->ul_tb_waveform_ctrl) 4908 return; 4909 4910 if (ul_tb_data->dyn_tb_bedge_en) { 4911 if (ul_tb_data->high_tf_client) { 4912 rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0); 4913 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4914 "[ULTB] Turn off if_bandedge\n"); 4915 } else if (ul_tb_data->low_tf_client) { 4916 rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 4917 ul_tb_info->def_if_bandedge); 4918 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4919 "[ULTB] Set to default if_bandedge = %d\n", 4920 ul_tb_info->def_if_bandedge); 4921 } 4922 } 4923 4924 if (ul_tb_info->dyn_tb_tri_en) { 4925 if (ul_tb_data->high_tf_client) { 4926 rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, 4927 B_TXSHAPE_TRIANGULAR_CFG, 0); 4928 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4929 "[ULTB] Turn off Tx triangle\n"); 4930 } else if (ul_tb_data->low_tf_client) { 4931 rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, 4932 B_TXSHAPE_TRIANGULAR_CFG, 4933 ul_tb_data->def_tri_idx); 4934 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4935 "[ULTB] Set to default tx_shap_idx = %d\n", 4936 ul_tb_data->def_tri_idx); 4937 } 4938 } 4939 } 4940 4941 void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) 4942 { 4943 const struct rtw89_chip_info *chip = rtwdev->chip; 4944 struct rtw89_phy_ul_tb_check_data ul_tb_data = {}; 4945 struct rtw89_vif_link *rtwvif_link; 4946 struct rtw89_vif *rtwvif; 4947 unsigned int link_id; 4948 4949 if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff) 4950 return; 4951 4952 if (rtwdev->total_sta_assoc != 1) 4953 return; 4954 4955 rtw89_for_each_rtwvif(rtwdev, rtwvif) 4956 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 4957 rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif_link, &ul_tb_data); 4958 4959 if (!ul_tb_data.valid) 4960 return; 4961 4962 rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data); 4963 } 4964 4965 static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev) 4966 { 4967 const struct rtw89_chip_info *chip = rtwdev->chip; 4968 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4969 4970 if (!chip->ul_tb_waveform_ctrl) 4971 return; 4972 4973 ul_tb_info->dyn_tb_tri_en = true; 4974 ul_tb_info->def_if_bandedge = 4975 rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN); 4976 } 4977 4978 static 4979 void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts) 4980 { 4981 ewma_rssi_init(&antdiv_sts->cck_rssi_avg); 4982 ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg); 4983 ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg); 4984 antdiv_sts->pkt_cnt_cck = 0; 4985 antdiv_sts->pkt_cnt_ofdm = 0; 4986 antdiv_sts->pkt_cnt_non_legacy = 0; 4987 antdiv_sts->evm = 0; 4988 } 4989 4990 static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev, 4991 struct rtw89_rx_phy_ppdu *phy_ppdu, 4992 struct rtw89_antdiv_stats *stats) 4993 { 4994 if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) { 4995 if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) { 4996 ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg); 4997 stats->pkt_cnt_cck++; 4998 } else { 4999 ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg); 5000 stats->pkt_cnt_ofdm++; 5001 stats->evm += phy_ppdu->ofdm.evm_min; 5002 } 5003 } else { 5004 ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg); 5005 stats->pkt_cnt_non_legacy++; 5006 stats->evm += phy_ppdu->ofdm.evm_min; 5007 } 5008 } 5009 5010 static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats) 5011 { 5012 if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck && 5013 stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm) 5014 return ewma_rssi_read(&stats->non_legacy_rssi_avg); 5015 else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck && 5016 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy) 5017 return ewma_rssi_read(&stats->ofdm_rssi_avg); 5018 else 5019 return ewma_rssi_read(&stats->cck_rssi_avg); 5020 } 5021 5022 static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats) 5023 { 5024 return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm); 5025 } 5026 5027 void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev, 5028 struct rtw89_rx_phy_ppdu *phy_ppdu) 5029 { 5030 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5031 struct rtw89_hal *hal = &rtwdev->hal; 5032 5033 if (!hal->ant_diversity || hal->ant_diversity_fixed) 5034 return; 5035 5036 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats); 5037 5038 if (!antdiv->get_stats) 5039 return; 5040 5041 if (hal->antenna_rx == RF_A) 5042 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats); 5043 else if (hal->antenna_rx == RF_B) 5044 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats); 5045 } 5046 5047 static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev) 5048 { 5049 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN, 5050 0x0, RTW89_PHY_0); 5051 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL, 5052 0x0, RTW89_PHY_0); 5053 5054 rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND, 5055 0x0, RTW89_PHY_0); 5056 rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT, 5057 0x0, RTW89_PHY_0); 5058 5059 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN, 5060 0x0, RTW89_PHY_0); 5061 5062 rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING, 5063 0x0100, RTW89_PHY_0); 5064 5065 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX, 5066 0x1, RTW89_PHY_0); 5067 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL, 5068 0x0, RTW89_PHY_0); 5069 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G, 5070 0x0, RTW89_PHY_0); 5071 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G, 5072 0x0, RTW89_PHY_0); 5073 } 5074 5075 static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev) 5076 { 5077 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5078 5079 rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats); 5080 rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats); 5081 rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats); 5082 } 5083 5084 static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev) 5085 { 5086 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5087 struct rtw89_hal *hal = &rtwdev->hal; 5088 5089 if (!hal->ant_diversity) 5090 return; 5091 5092 antdiv->get_stats = false; 5093 antdiv->rssi_pre = 0; 5094 rtw89_phy_antdiv_sts_reset(rtwdev); 5095 rtw89_phy_antdiv_reg_init(rtwdev); 5096 } 5097 5098 static void rtw89_phy_thermal_protect(struct rtw89_dev *rtwdev) 5099 { 5100 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 5101 struct rtw89_hal *hal = &rtwdev->hal; 5102 u8 th_max = phystat->last_thermal_max; 5103 u8 lv = hal->thermal_prot_lv; 5104 5105 if (!hal->thermal_prot_th || 5106 (hal->disabled_dm_bitmap & BIT(RTW89_DM_THERMAL_PROTECT))) 5107 return; 5108 5109 if (th_max > hal->thermal_prot_th && lv < RTW89_THERMAL_PROT_LV_MAX) 5110 lv++; 5111 else if (th_max < hal->thermal_prot_th - 2 && lv > 0) 5112 lv--; 5113 else 5114 return; 5115 5116 hal->thermal_prot_lv = lv; 5117 5118 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "thermal protection lv=%d\n", lv); 5119 5120 rtw89_fw_h2c_tx_duty(rtwdev, hal->thermal_prot_lv); 5121 } 5122 5123 static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev) 5124 { 5125 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 5126 u8 th, th_max = 0; 5127 int i; 5128 5129 for (i = 0; i < rtwdev->chip->rf_path_num; i++) { 5130 th = rtw89_chip_get_thermal(rtwdev, i); 5131 if (th) 5132 ewma_thermal_add(&phystat->avg_thermal[i], th); 5133 5134 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 5135 "path(%d) thermal cur=%u avg=%ld", i, th, 5136 ewma_thermal_read(&phystat->avg_thermal[i])); 5137 5138 th_max = max(th_max, th); 5139 } 5140 5141 phystat->last_thermal_max = th_max; 5142 } 5143 5144 struct rtw89_phy_iter_rssi_data { 5145 struct rtw89_dev *rtwdev; 5146 struct rtw89_phy_ch_info *ch_info; 5147 bool rssi_changed; 5148 }; 5149 5150 static 5151 void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link, 5152 struct rtw89_phy_iter_rssi_data *rssi_data) 5153 { 5154 struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info; 5155 unsigned long rssi_curr; 5156 5157 rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi); 5158 5159 if (rssi_curr < ch_info->rssi_min) { 5160 ch_info->rssi_min = rssi_curr; 5161 ch_info->rssi_min_macid = rtwsta_link->mac_id; 5162 } 5163 5164 if (rtwsta_link->prev_rssi == 0) { 5165 rtwsta_link->prev_rssi = rssi_curr; 5166 } else if (abs((int)rtwsta_link->prev_rssi - (int)rssi_curr) > 5167 (3 << RSSI_FACTOR)) { 5168 rtwsta_link->prev_rssi = rssi_curr; 5169 rssi_data->rssi_changed = true; 5170 } 5171 } 5172 5173 static void rtw89_phy_stat_rssi_update_iter(void *data, 5174 struct ieee80211_sta *sta) 5175 { 5176 struct rtw89_phy_iter_rssi_data *rssi_data = 5177 (struct rtw89_phy_iter_rssi_data *)data; 5178 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 5179 struct rtw89_sta_link *rtwsta_link; 5180 unsigned int link_id; 5181 5182 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) 5183 __rtw89_phy_stat_rssi_update_iter(rtwsta_link, rssi_data); 5184 } 5185 5186 static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev) 5187 { 5188 struct rtw89_phy_iter_rssi_data rssi_data = {0}; 5189 5190 rssi_data.rtwdev = rtwdev; 5191 rssi_data.ch_info = &rtwdev->ch_info; 5192 rssi_data.ch_info->rssi_min = U8_MAX; 5193 ieee80211_iterate_stations_atomic(rtwdev->hw, 5194 rtw89_phy_stat_rssi_update_iter, 5195 &rssi_data); 5196 if (rssi_data.rssi_changed) 5197 rtw89_btc_ntfy_wl_sta(rtwdev); 5198 } 5199 5200 static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev) 5201 { 5202 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 5203 int i; 5204 5205 for (i = 0; i < rtwdev->chip->rf_path_num; i++) 5206 ewma_thermal_init(&phystat->avg_thermal[i]); 5207 5208 rtw89_phy_stat_thermal_update(rtwdev); 5209 5210 memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); 5211 memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat)); 5212 5213 ewma_rssi_init(&phystat->bcn_rssi); 5214 5215 rtwdev->hal.thermal_prot_lv = 0; 5216 } 5217 5218 void rtw89_phy_stat_track(struct rtw89_dev *rtwdev) 5219 { 5220 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 5221 5222 rtw89_phy_stat_thermal_update(rtwdev); 5223 rtw89_phy_thermal_protect(rtwdev); 5224 rtw89_phy_stat_rssi_update(rtwdev); 5225 5226 phystat->last_pkt_stat = phystat->cur_pkt_stat; 5227 memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); 5228 } 5229 5230 static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us) 5231 { 5232 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5233 5234 return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); 5235 } 5236 5237 static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx) 5238 { 5239 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5240 5241 return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); 5242 } 5243 5244 static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) 5245 { 5246 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5247 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5248 const struct rtw89_ccx_regs *ccx = phy->ccx; 5249 5250 env->ccx_manual_ctrl = false; 5251 env->ccx_ongoing = false; 5252 env->ccx_rac_lv = RTW89_RAC_RELEASE; 5253 env->ccx_period = 0; 5254 env->ccx_unit_idx = RTW89_CCX_32_US; 5255 5256 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1); 5257 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1); 5258 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); 5259 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask, 5260 RTW89_CCX_EDCCA_BW20_0); 5261 } 5262 5263 static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report, 5264 u16 score) 5265 { 5266 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5267 u32 numer = 0; 5268 u16 ret = 0; 5269 5270 numer = report * score + (env->ccx_period >> 1); 5271 if (env->ccx_period) 5272 ret = numer / env->ccx_period; 5273 5274 return ret >= score ? score - 1 : ret; 5275 } 5276 5277 static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev, 5278 u16 time_ms, u32 *period, 5279 u32 *unit_idx) 5280 { 5281 u32 idx; 5282 u8 quotient; 5283 5284 if (time_ms >= CCX_MAX_PERIOD) 5285 time_ms = CCX_MAX_PERIOD; 5286 5287 quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD; 5288 5289 if (quotient < 4) 5290 idx = RTW89_CCX_4_US; 5291 else if (quotient < 8) 5292 idx = RTW89_CCX_8_US; 5293 else if (quotient < 16) 5294 idx = RTW89_CCX_16_US; 5295 else 5296 idx = RTW89_CCX_32_US; 5297 5298 *unit_idx = idx; 5299 *period = (time_ms * MS_TO_4US_RATIO) >> idx; 5300 5301 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5302 "[Trigger Time] period:%d, unit_idx:%d\n", 5303 *period, *unit_idx); 5304 } 5305 5306 static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev) 5307 { 5308 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5309 5310 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5311 "lv:(%d)->(0)\n", env->ccx_rac_lv); 5312 5313 env->ccx_ongoing = false; 5314 env->ccx_rac_lv = RTW89_RAC_RELEASE; 5315 env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5316 } 5317 5318 static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev, 5319 struct rtw89_ccx_para_info *para) 5320 { 5321 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5322 bool is_update = env->ifs_clm_app != para->ifs_clm_app; 5323 u8 i = 0; 5324 u16 *ifs_th_l = env->ifs_clm_th_l; 5325 u16 *ifs_th_h = env->ifs_clm_th_h; 5326 u32 ifs_th0_us = 0, ifs_th_times = 0; 5327 u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0}; 5328 5329 if (!is_update) 5330 goto ifs_update_finished; 5331 5332 switch (para->ifs_clm_app) { 5333 case RTW89_IFS_CLM_INIT: 5334 case RTW89_IFS_CLM_BACKGROUND: 5335 case RTW89_IFS_CLM_ACS: 5336 case RTW89_IFS_CLM_DBG: 5337 case RTW89_IFS_CLM_DIG: 5338 case RTW89_IFS_CLM_TDMA_DIG: 5339 ifs_th0_us = IFS_CLM_TH0_UPPER; 5340 ifs_th_times = IFS_CLM_TH_MUL; 5341 break; 5342 case RTW89_IFS_CLM_DBG_MANUAL: 5343 ifs_th0_us = para->ifs_clm_manual_th0; 5344 ifs_th_times = para->ifs_clm_manual_th_times; 5345 break; 5346 default: 5347 break; 5348 } 5349 5350 /* Set sampling threshold for 4 different regions, unit in idx_cnt. 5351 * low[i] = high[i-1] + 1 5352 * high[i] = high[i-1] * ifs_th_times 5353 */ 5354 ifs_th_l[IFS_CLM_TH_START_IDX] = 0; 5355 ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us; 5356 ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, 5357 ifs_th0_us); 5358 for (i = 1; i < RTW89_IFS_CLM_NUM; i++) { 5359 ifs_th_l[i] = ifs_th_h[i - 1] + 1; 5360 ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times; 5361 ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]); 5362 } 5363 5364 ifs_update_finished: 5365 if (!is_update) 5366 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5367 "No need to update IFS_TH\n"); 5368 5369 return is_update; 5370 } 5371 5372 static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) 5373 { 5374 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5375 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5376 const struct rtw89_ccx_regs *ccx = phy->ccx; 5377 u8 i = 0; 5378 5379 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask, 5380 env->ifs_clm_th_l[0]); 5381 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask, 5382 env->ifs_clm_th_l[1]); 5383 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask, 5384 env->ifs_clm_th_l[2]); 5385 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask, 5386 env->ifs_clm_th_l[3]); 5387 5388 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask, 5389 env->ifs_clm_th_h[0]); 5390 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask, 5391 env->ifs_clm_th_h[1]); 5392 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask, 5393 env->ifs_clm_th_h[2]); 5394 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask, 5395 env->ifs_clm_th_h[3]); 5396 5397 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5398 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5399 "Update IFS_T%d_th{low, high} : {%d, %d}\n", 5400 i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]); 5401 } 5402 5403 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) 5404 { 5405 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5406 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5407 const struct rtw89_ccx_regs *ccx = phy->ccx; 5408 struct rtw89_ccx_para_info para = {0}; 5409 5410 env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5411 env->ifs_clm_mntr_time = 0; 5412 5413 para.ifs_clm_app = RTW89_IFS_CLM_INIT; 5414 if (rtw89_phy_ifs_clm_th_update_check(rtwdev, ¶)) 5415 rtw89_phy_ifs_clm_set_th_reg(rtwdev); 5416 5417 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true); 5418 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true); 5419 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true); 5420 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true); 5421 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true); 5422 } 5423 5424 static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, 5425 enum rtw89_env_racing_lv level) 5426 { 5427 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5428 int ret = 0; 5429 5430 if (level >= RTW89_RAC_MAX_NUM) { 5431 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5432 "[WARNING] Wrong LV=%d\n", level); 5433 return -EINVAL; 5434 } 5435 5436 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5437 "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing, 5438 env->ccx_rac_lv, level); 5439 5440 if (env->ccx_ongoing) { 5441 if (level <= env->ccx_rac_lv) 5442 ret = -EINVAL; 5443 else 5444 env->ccx_ongoing = false; 5445 } 5446 5447 if (ret == 0) 5448 env->ccx_rac_lv = level; 5449 5450 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n", 5451 !ret); 5452 5453 return ret; 5454 } 5455 5456 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev) 5457 { 5458 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5459 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5460 const struct rtw89_ccx_regs *ccx = phy->ccx; 5461 5462 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0); 5463 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0); 5464 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1); 5465 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); 5466 5467 env->ccx_ongoing = true; 5468 } 5469 5470 static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) 5471 { 5472 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5473 u8 i = 0; 5474 u32 res = 0; 5475 5476 env->ifs_clm_tx_ratio = 5477 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT); 5478 env->ifs_clm_edcca_excl_cca_ratio = 5479 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca, 5480 PERCENT); 5481 env->ifs_clm_cck_fa_ratio = 5482 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT); 5483 env->ifs_clm_ofdm_fa_ratio = 5484 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT); 5485 env->ifs_clm_cck_cca_excl_fa_ratio = 5486 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa, 5487 PERCENT); 5488 env->ifs_clm_ofdm_cca_excl_fa_ratio = 5489 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa, 5490 PERCENT); 5491 env->ifs_clm_cck_fa_permil = 5492 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL); 5493 env->ifs_clm_ofdm_fa_permil = 5494 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL); 5495 5496 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) { 5497 if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) { 5498 env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD; 5499 } else { 5500 env->ifs_clm_ifs_avg[i] = 5501 rtw89_phy_ccx_idx_to_us(rtwdev, 5502 env->ifs_clm_avg[i]); 5503 } 5504 5505 res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]); 5506 res += env->ifs_clm_his[i] >> 1; 5507 if (env->ifs_clm_his[i]) 5508 res /= env->ifs_clm_his[i]; 5509 else 5510 res = 0; 5511 env->ifs_clm_cca_avg[i] = res; 5512 } 5513 5514 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5515 "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n", 5516 env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio); 5517 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5518 "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n", 5519 env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio); 5520 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5521 "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n", 5522 env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil); 5523 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5524 "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n", 5525 env->ifs_clm_cck_cca_excl_fa_ratio, 5526 env->ifs_clm_ofdm_cca_excl_fa_ratio); 5527 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5528 "Time:[his, ifs_avg(us), cca_avg(us)]\n"); 5529 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5530 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n", 5531 i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i], 5532 env->ifs_clm_cca_avg[i]); 5533 } 5534 5535 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) 5536 { 5537 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5538 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5539 const struct rtw89_ccx_regs *ccx = phy->ccx; 5540 u8 i = 0; 5541 5542 if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, 5543 ccx->ifs_cnt_done_mask) == 0) { 5544 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5545 "Get IFS_CLM report Fail\n"); 5546 return false; 5547 } 5548 5549 env->ifs_clm_tx = 5550 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, 5551 ccx->ifs_clm_tx_cnt_msk); 5552 env->ifs_clm_edcca_excl_cca = 5553 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, 5554 ccx->ifs_clm_edcca_excl_cca_fa_mask); 5555 env->ifs_clm_cckcca_excl_fa = 5556 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, 5557 ccx->ifs_clm_cckcca_excl_fa_mask); 5558 env->ifs_clm_ofdmcca_excl_fa = 5559 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, 5560 ccx->ifs_clm_ofdmcca_excl_fa_mask); 5561 env->ifs_clm_cckfa = 5562 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, 5563 ccx->ifs_clm_cck_fa_mask); 5564 env->ifs_clm_ofdmfa = 5565 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, 5566 ccx->ifs_clm_ofdm_fa_mask); 5567 5568 env->ifs_clm_his[0] = 5569 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5570 ccx->ifs_t1_his_mask); 5571 env->ifs_clm_his[1] = 5572 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5573 ccx->ifs_t2_his_mask); 5574 env->ifs_clm_his[2] = 5575 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5576 ccx->ifs_t3_his_mask); 5577 env->ifs_clm_his[3] = 5578 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5579 ccx->ifs_t4_his_mask); 5580 5581 env->ifs_clm_avg[0] = 5582 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, 5583 ccx->ifs_t1_avg_mask); 5584 env->ifs_clm_avg[1] = 5585 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, 5586 ccx->ifs_t2_avg_mask); 5587 env->ifs_clm_avg[2] = 5588 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, 5589 ccx->ifs_t3_avg_mask); 5590 env->ifs_clm_avg[3] = 5591 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, 5592 ccx->ifs_t4_avg_mask); 5593 5594 env->ifs_clm_cca[0] = 5595 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, 5596 ccx->ifs_t1_cca_mask); 5597 env->ifs_clm_cca[1] = 5598 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, 5599 ccx->ifs_t2_cca_mask); 5600 env->ifs_clm_cca[2] = 5601 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, 5602 ccx->ifs_t3_cca_mask); 5603 env->ifs_clm_cca[3] = 5604 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, 5605 ccx->ifs_t4_cca_mask); 5606 5607 env->ifs_clm_total_ifs = 5608 rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, 5609 ccx->ifs_total_mask); 5610 5611 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n", 5612 env->ifs_clm_total_ifs); 5613 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5614 "{Tx, EDCCA_exclu_cca} = {%d, %d}\n", 5615 env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca); 5616 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5617 "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n", 5618 env->ifs_clm_cckfa, env->ifs_clm_ofdmfa); 5619 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5620 "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n", 5621 env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa); 5622 5623 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n"); 5624 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5625 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5626 "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i], 5627 env->ifs_clm_avg[i], env->ifs_clm_cca[i]); 5628 5629 rtw89_phy_ifs_clm_get_utility(rtwdev); 5630 5631 return true; 5632 } 5633 5634 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, 5635 struct rtw89_ccx_para_info *para) 5636 { 5637 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5638 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5639 const struct rtw89_ccx_regs *ccx = phy->ccx; 5640 u32 period = 0; 5641 u32 unit_idx = 0; 5642 5643 if (para->mntr_time == 0) { 5644 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5645 "[WARN] MNTR_TIME is 0\n"); 5646 return -EINVAL; 5647 } 5648 5649 if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv)) 5650 return -EINVAL; 5651 5652 if (para->mntr_time != env->ifs_clm_mntr_time) { 5653 rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, 5654 &period, &unit_idx); 5655 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, 5656 ccx->ifs_clm_period_mask, period); 5657 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, 5658 ccx->ifs_clm_cnt_unit_mask, 5659 unit_idx); 5660 5661 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5662 "Update IFS-CLM time ((%d)) -> ((%d))\n", 5663 env->ifs_clm_mntr_time, para->mntr_time); 5664 5665 env->ifs_clm_mntr_time = para->mntr_time; 5666 env->ccx_period = (u16)period; 5667 env->ccx_unit_idx = (u8)unit_idx; 5668 } 5669 5670 if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) { 5671 env->ifs_clm_app = para->ifs_clm_app; 5672 rtw89_phy_ifs_clm_set_th_reg(rtwdev); 5673 } 5674 5675 return 0; 5676 } 5677 5678 void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) 5679 { 5680 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5681 struct rtw89_ccx_para_info para = {0}; 5682 u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL; 5683 5684 env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL; 5685 if (env->ccx_manual_ctrl) { 5686 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5687 "CCX in manual ctrl\n"); 5688 return; 5689 } 5690 5691 /* only ifs_clm for now */ 5692 if (rtw89_phy_ifs_clm_get_result(rtwdev)) 5693 env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM; 5694 5695 rtw89_phy_ccx_racing_release(rtwdev); 5696 para.mntr_time = 1900; 5697 para.rac_lv = RTW89_RAC_LV_1; 5698 para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5699 5700 if (rtw89_phy_ifs_clm_set(rtwdev, ¶) == 0) 5701 chk_result |= RTW89_PHY_ENV_MON_IFS_CLM; 5702 if (chk_result) 5703 rtw89_phy_ccx_trigger(rtwdev); 5704 5705 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5706 "get_result=0x%x, chk_result:0x%x\n", 5707 env->ccx_watchdog_result, chk_result); 5708 } 5709 5710 static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page) 5711 { 5712 if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM || 5713 *ie_page == RTW89_RSVD_9) 5714 return false; 5715 else if (*ie_page > RTW89_RSVD_9) 5716 *ie_page -= 1; 5717 5718 return true; 5719 } 5720 5721 static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page) 5722 { 5723 static const u8 ie_page_shift = 2; 5724 5725 return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift); 5726 } 5727 5728 static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev, 5729 enum rtw89_phy_status_bitmap ie_page, 5730 enum rtw89_phy_idx phy_idx) 5731 { 5732 u32 addr; 5733 5734 if (!rtw89_physts_ie_page_valid(&ie_page)) 5735 return 0; 5736 5737 addr = rtw89_phy_get_ie_bitmap_addr(ie_page); 5738 5739 return rtw89_phy_read32_idx(rtwdev, addr, MASKDWORD, phy_idx); 5740 } 5741 5742 static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev, 5743 enum rtw89_phy_status_bitmap ie_page, 5744 u32 val, enum rtw89_phy_idx phy_idx) 5745 { 5746 const struct rtw89_chip_info *chip = rtwdev->chip; 5747 u32 addr; 5748 5749 if (!rtw89_physts_ie_page_valid(&ie_page)) 5750 return; 5751 5752 if (chip->chip_id == RTL8852A) 5753 val &= B_PHY_STS_BITMAP_MSK_52A; 5754 5755 addr = rtw89_phy_get_ie_bitmap_addr(ie_page); 5756 rtw89_phy_write32_idx(rtwdev, addr, MASKDWORD, val, phy_idx); 5757 } 5758 5759 static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev, 5760 enum rtw89_phy_status_bitmap bitmap, 5761 enum rtw89_phy_status_ie_type ie, 5762 bool enable, enum rtw89_phy_idx phy_idx) 5763 { 5764 u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap, phy_idx); 5765 5766 if (enable) 5767 val |= BIT(ie); 5768 else 5769 val &= ~BIT(ie); 5770 5771 rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val, phy_idx); 5772 } 5773 5774 static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev, 5775 bool enable, 5776 enum rtw89_phy_idx phy_idx) 5777 { 5778 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5779 const struct rtw89_physts_regs *physts = phy->physts; 5780 5781 if (enable) { 5782 rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr, 5783 physts->dis_trigger_fail_mask, phy_idx); 5784 rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr, 5785 physts->dis_trigger_brk_mask, phy_idx); 5786 } else { 5787 rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr, 5788 physts->dis_trigger_fail_mask, phy_idx); 5789 rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr, 5790 physts->dis_trigger_brk_mask, phy_idx); 5791 } 5792 } 5793 5794 static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev, 5795 enum rtw89_phy_idx phy_idx) 5796 { 5797 u8 i; 5798 5799 rtw89_physts_enable_fail_report(rtwdev, false, phy_idx); 5800 5801 for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) { 5802 if (i >= RTW89_CCK_PKT) 5803 rtw89_physts_enable_ie_bitmap(rtwdev, i, 5804 RTW89_PHYSTS_IE09_FTR_0, 5805 true, phy_idx); 5806 if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) || 5807 (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT)) 5808 continue; 5809 rtw89_physts_enable_ie_bitmap(rtwdev, i, 5810 RTW89_PHYSTS_IE24_OFDM_TD_PATH_A, 5811 true, phy_idx); 5812 } 5813 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT, 5814 RTW89_PHYSTS_IE13_DL_MU_DEF, true, phy_idx); 5815 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT, 5816 RTW89_PHYSTS_IE13_DL_MU_DEF, true, phy_idx); 5817 5818 /* force IE01 for channel index, only channel field is valid */ 5819 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT, 5820 RTW89_PHYSTS_IE01_CMN_OFDM, true, phy_idx); 5821 } 5822 5823 static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev) 5824 { 5825 __rtw89_physts_parsing_init(rtwdev, RTW89_PHY_0); 5826 if (rtwdev->dbcc_en) 5827 __rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1); 5828 } 5829 5830 static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) 5831 { 5832 const struct rtw89_chip_info *chip = rtwdev->chip; 5833 struct rtw89_dig_info *dig = &rtwdev->dig; 5834 const struct rtw89_phy_dig_gain_cfg *cfg; 5835 const char *msg; 5836 u8 i; 5837 s8 gain_base; 5838 s8 *gain_arr; 5839 u32 tmp; 5840 5841 switch (type) { 5842 case RTW89_DIG_GAIN_LNA_G: 5843 gain_arr = dig->lna_gain_g; 5844 gain_base = LNA0_GAIN; 5845 cfg = chip->dig_table->cfg_lna_g; 5846 msg = "lna_gain_g"; 5847 break; 5848 case RTW89_DIG_GAIN_TIA_G: 5849 gain_arr = dig->tia_gain_g; 5850 gain_base = TIA0_GAIN_G; 5851 cfg = chip->dig_table->cfg_tia_g; 5852 msg = "tia_gain_g"; 5853 break; 5854 case RTW89_DIG_GAIN_LNA_A: 5855 gain_arr = dig->lna_gain_a; 5856 gain_base = LNA0_GAIN; 5857 cfg = chip->dig_table->cfg_lna_a; 5858 msg = "lna_gain_a"; 5859 break; 5860 case RTW89_DIG_GAIN_TIA_A: 5861 gain_arr = dig->tia_gain_a; 5862 gain_base = TIA0_GAIN_A; 5863 cfg = chip->dig_table->cfg_tia_a; 5864 msg = "tia_gain_a"; 5865 break; 5866 default: 5867 return; 5868 } 5869 5870 for (i = 0; i < cfg->size; i++) { 5871 tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr, 5872 cfg->table[i].mask); 5873 tmp >>= DIG_GAIN_SHIFT; 5874 gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base; 5875 gain_base += DIG_GAIN; 5876 5877 rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n", 5878 msg, i, gain_arr[i]); 5879 } 5880 } 5881 5882 static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev) 5883 { 5884 struct rtw89_dig_info *dig = &rtwdev->dig; 5885 u32 tmp; 5886 u8 i; 5887 5888 if (!rtwdev->hal.support_igi) 5889 return; 5890 5891 tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW, 5892 B_PATH0_IB_PKPW_MSK); 5893 dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT); 5894 dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK, 5895 B_PATH0_IB_PBK_MSK); 5896 rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n", 5897 dig->ib_pkpwr, dig->ib_pbk); 5898 5899 for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++) 5900 rtw89_phy_dig_read_gain_table(rtwdev, i); 5901 } 5902 5903 static const u8 rssi_nolink = 22; 5904 static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104}; 5905 static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88}; 5906 static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16}; 5907 static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528}; 5908 5909 static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev) 5910 { 5911 struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; 5912 struct rtw89_dig_info *dig = &rtwdev->dig; 5913 bool is_linked = rtwdev->total_sta_assoc > 0; 5914 5915 if (is_linked) { 5916 dig->igi_rssi = ch_info->rssi_min >> 1; 5917 } else { 5918 rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n"); 5919 dig->igi_rssi = rssi_nolink; 5920 } 5921 } 5922 5923 static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev) 5924 { 5925 struct rtw89_dig_info *dig = &rtwdev->dig; 5926 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); 5927 bool is_linked = rtwdev->total_sta_assoc > 0; 5928 const u16 *fa_th_src = NULL; 5929 5930 switch (chan->band_type) { 5931 case RTW89_BAND_2G: 5932 dig->lna_gain = dig->lna_gain_g; 5933 dig->tia_gain = dig->tia_gain_g; 5934 fa_th_src = is_linked ? fa_th_2g : fa_th_nolink; 5935 dig->force_gaincode_idx_en = false; 5936 dig->dyn_pd_th_en = true; 5937 break; 5938 case RTW89_BAND_5G: 5939 default: 5940 dig->lna_gain = dig->lna_gain_a; 5941 dig->tia_gain = dig->tia_gain_a; 5942 fa_th_src = is_linked ? fa_th_5g : fa_th_nolink; 5943 dig->force_gaincode_idx_en = true; 5944 dig->dyn_pd_th_en = true; 5945 break; 5946 } 5947 memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th)); 5948 memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th)); 5949 } 5950 5951 static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20; 5952 static const u8 igi_max_performance_mode = 0x5a; 5953 static const u8 dynamic_pd_threshold_max; 5954 5955 static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev) 5956 { 5957 struct rtw89_dig_info *dig = &rtwdev->dig; 5958 5959 dig->cur_gaincode.lna_idx = LNA_IDX_MAX; 5960 dig->cur_gaincode.tia_idx = TIA_IDX_MAX; 5961 dig->cur_gaincode.rxb_idx = RXB_IDX_MAX; 5962 dig->force_gaincode.lna_idx = LNA_IDX_MAX; 5963 dig->force_gaincode.tia_idx = TIA_IDX_MAX; 5964 dig->force_gaincode.rxb_idx = RXB_IDX_MAX; 5965 5966 dig->dyn_igi_max = igi_max_performance_mode; 5967 dig->dyn_igi_min = dynamic_igi_min; 5968 dig->dyn_pd_th_max = dynamic_pd_threshold_max; 5969 dig->pd_low_th_ofst = pd_low_th_offset; 5970 dig->is_linked_pre = false; 5971 } 5972 5973 static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev) 5974 { 5975 rtw89_phy_dig_update_gain_para(rtwdev); 5976 rtw89_phy_dig_reset(rtwdev); 5977 } 5978 5979 static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) 5980 { 5981 struct rtw89_dig_info *dig = &rtwdev->dig; 5982 u8 lna_idx; 5983 5984 if (rssi < dig->igi_rssi_th[0]) 5985 lna_idx = RTW89_DIG_GAIN_LNA_IDX6; 5986 else if (rssi < dig->igi_rssi_th[1]) 5987 lna_idx = RTW89_DIG_GAIN_LNA_IDX5; 5988 else if (rssi < dig->igi_rssi_th[2]) 5989 lna_idx = RTW89_DIG_GAIN_LNA_IDX4; 5990 else if (rssi < dig->igi_rssi_th[3]) 5991 lna_idx = RTW89_DIG_GAIN_LNA_IDX3; 5992 else if (rssi < dig->igi_rssi_th[4]) 5993 lna_idx = RTW89_DIG_GAIN_LNA_IDX2; 5994 else 5995 lna_idx = RTW89_DIG_GAIN_LNA_IDX1; 5996 5997 return lna_idx; 5998 } 5999 6000 static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) 6001 { 6002 struct rtw89_dig_info *dig = &rtwdev->dig; 6003 u8 tia_idx; 6004 6005 if (rssi < dig->igi_rssi_th[0]) 6006 tia_idx = RTW89_DIG_GAIN_TIA_IDX1; 6007 else 6008 tia_idx = RTW89_DIG_GAIN_TIA_IDX0; 6009 6010 return tia_idx; 6011 } 6012 6013 #define IB_PBK_BASE 110 6014 #define WB_RSSI_BASE 10 6015 static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, 6016 struct rtw89_agc_gaincode_set *set) 6017 { 6018 struct rtw89_dig_info *dig = &rtwdev->dig; 6019 s8 lna_gain = dig->lna_gain[set->lna_idx]; 6020 s8 tia_gain = dig->tia_gain[set->tia_idx]; 6021 s32 wb_rssi = rssi + lna_gain + tia_gain; 6022 s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE; 6023 u8 rxb_idx; 6024 6025 rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi; 6026 rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX); 6027 6028 rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n", 6029 wb_rssi, rxb_idx_tmp); 6030 6031 return rxb_idx; 6032 } 6033 6034 static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, 6035 struct rtw89_agc_gaincode_set *set) 6036 { 6037 set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi); 6038 set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi); 6039 set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set); 6040 6041 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6042 "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n", 6043 rssi, set->lna_idx, set->tia_idx, set->rxb_idx); 6044 } 6045 6046 #define IGI_OFFSET_MAX 25 6047 #define IGI_OFFSET_MUL 2 6048 static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev) 6049 { 6050 struct rtw89_dig_info *dig = &rtwdev->dig; 6051 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 6052 enum rtw89_dig_noisy_level noisy_lv; 6053 u8 igi_offset = dig->fa_rssi_ofst; 6054 u16 fa_ratio = 0; 6055 6056 fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil; 6057 6058 if (fa_ratio < dig->fa_th[0]) 6059 noisy_lv = RTW89_DIG_NOISY_LEVEL0; 6060 else if (fa_ratio < dig->fa_th[1]) 6061 noisy_lv = RTW89_DIG_NOISY_LEVEL1; 6062 else if (fa_ratio < dig->fa_th[2]) 6063 noisy_lv = RTW89_DIG_NOISY_LEVEL2; 6064 else if (fa_ratio < dig->fa_th[3]) 6065 noisy_lv = RTW89_DIG_NOISY_LEVEL3; 6066 else 6067 noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX; 6068 6069 if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2) 6070 igi_offset = 0; 6071 else 6072 igi_offset += noisy_lv * IGI_OFFSET_MUL; 6073 6074 igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX); 6075 dig->fa_rssi_ofst = igi_offset; 6076 6077 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6078 "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n", 6079 dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]); 6080 6081 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6082 "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n", 6083 env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil, 6084 env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil, 6085 noisy_lv, igi_offset); 6086 } 6087 6088 static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx) 6089 { 6090 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6091 6092 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr, 6093 dig_regs->p0_lna_init.mask, lna_idx); 6094 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr, 6095 dig_regs->p1_lna_init.mask, lna_idx); 6096 } 6097 6098 static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx) 6099 { 6100 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6101 6102 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr, 6103 dig_regs->p0_tia_init.mask, tia_idx); 6104 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr, 6105 dig_regs->p1_tia_init.mask, tia_idx); 6106 } 6107 6108 static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx) 6109 { 6110 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6111 6112 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr, 6113 dig_regs->p0_rxb_init.mask, rxb_idx); 6114 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr, 6115 dig_regs->p1_rxb_init.mask, rxb_idx); 6116 } 6117 6118 static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev, 6119 const struct rtw89_agc_gaincode_set set) 6120 { 6121 if (!rtwdev->hal.support_igi) 6122 return; 6123 6124 rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx); 6125 rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx); 6126 rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx); 6127 6128 rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n", 6129 set.lna_idx, set.tia_idx, set.rxb_idx); 6130 } 6131 6132 static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev, 6133 bool enable) 6134 { 6135 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6136 6137 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr, 6138 dig_regs->p0_p20_pagcugc_en.mask, enable); 6139 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr, 6140 dig_regs->p0_s20_pagcugc_en.mask, enable); 6141 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr, 6142 dig_regs->p1_p20_pagcugc_en.mask, enable); 6143 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr, 6144 dig_regs->p1_s20_pagcugc_en.mask, enable); 6145 6146 rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable); 6147 } 6148 6149 static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev) 6150 { 6151 struct rtw89_dig_info *dig = &rtwdev->dig; 6152 6153 if (!rtwdev->hal.support_igi) 6154 return; 6155 6156 if (dig->force_gaincode_idx_en) { 6157 rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); 6158 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6159 "Force gaincode index enabled.\n"); 6160 } else { 6161 rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi, 6162 &dig->cur_gaincode); 6163 rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode); 6164 } 6165 } 6166 6167 static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, 6168 bool enable) 6169 { 6170 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); 6171 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6172 enum rtw89_bandwidth cbw = chan->band_width; 6173 struct rtw89_dig_info *dig = &rtwdev->dig; 6174 u8 final_rssi = 0, under_region = dig->pd_low_th_ofst; 6175 u8 ofdm_cca_th; 6176 s8 cck_cca_th; 6177 u32 pd_val = 0; 6178 6179 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) 6180 under_region += PD_TH_SB_FLTR_CMP_VAL; 6181 6182 switch (cbw) { 6183 case RTW89_CHANNEL_WIDTH_40: 6184 under_region += PD_TH_BW40_CMP_VAL; 6185 break; 6186 case RTW89_CHANNEL_WIDTH_80: 6187 under_region += PD_TH_BW80_CMP_VAL; 6188 break; 6189 case RTW89_CHANNEL_WIDTH_160: 6190 under_region += PD_TH_BW160_CMP_VAL; 6191 break; 6192 case RTW89_CHANNEL_WIDTH_20: 6193 fallthrough; 6194 default: 6195 under_region += PD_TH_BW20_CMP_VAL; 6196 break; 6197 } 6198 6199 dig->dyn_pd_th_max = dig->igi_rssi; 6200 6201 final_rssi = min_t(u8, rssi, dig->igi_rssi); 6202 ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region, 6203 PD_TH_MAX_RSSI + under_region); 6204 6205 if (enable) { 6206 pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1; 6207 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6208 "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n", 6209 final_rssi, ofdm_cca_th, under_region, pd_val); 6210 } else { 6211 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6212 "Dynamic PD th disabled, Set PD_low_bd=0\n"); 6213 } 6214 6215 rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, 6216 dig_regs->pd_lower_bound_mask, pd_val); 6217 rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, 6218 dig_regs->pd_spatial_reuse_en, enable); 6219 6220 if (!rtwdev->hal.support_cckpd) 6221 return; 6222 6223 cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI); 6224 pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX); 6225 6226 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6227 "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n", 6228 final_rssi, cck_cca_th, under_region, pd_val); 6229 6230 rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg, 6231 dig_regs->bmode_cca_rssi_limit_en, enable); 6232 rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg, 6233 dig_regs->bmode_rssi_nocca_low_th_mask, pd_val); 6234 } 6235 6236 void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) 6237 { 6238 struct rtw89_dig_info *dig = &rtwdev->dig; 6239 6240 dig->bypass_dig = false; 6241 rtw89_phy_dig_para_reset(rtwdev); 6242 rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); 6243 rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false); 6244 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); 6245 rtw89_phy_dig_update_para(rtwdev); 6246 } 6247 6248 #define IGI_RSSI_MIN 10 6249 #define ABS_IGI_MIN 0xc 6250 void rtw89_phy_dig(struct rtw89_dev *rtwdev) 6251 { 6252 struct rtw89_dig_info *dig = &rtwdev->dig; 6253 bool is_linked = rtwdev->total_sta_assoc > 0; 6254 u8 igi_min; 6255 6256 if (unlikely(dig->bypass_dig)) { 6257 dig->bypass_dig = false; 6258 return; 6259 } 6260 6261 rtw89_phy_dig_update_rssi_info(rtwdev); 6262 6263 if (!dig->is_linked_pre && is_linked) { 6264 rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n"); 6265 rtw89_phy_dig_update_para(rtwdev); 6266 dig->igi_fa_rssi = dig->igi_rssi; 6267 } else if (dig->is_linked_pre && !is_linked) { 6268 rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n"); 6269 rtw89_phy_dig_update_para(rtwdev); 6270 dig->igi_fa_rssi = dig->igi_rssi; 6271 } 6272 dig->is_linked_pre = is_linked; 6273 6274 rtw89_phy_dig_igi_offset_by_env(rtwdev); 6275 6276 igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0); 6277 dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode); 6278 dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN); 6279 6280 if (dig->dyn_igi_max >= dig->dyn_igi_min) { 6281 dig->igi_fa_rssi += dig->fa_rssi_ofst; 6282 dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min, 6283 dig->dyn_igi_max); 6284 } else { 6285 dig->igi_fa_rssi = dig->dyn_igi_max; 6286 } 6287 6288 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6289 "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n", 6290 dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, 6291 dig->igi_fa_rssi); 6292 6293 rtw89_phy_dig_config_igi(rtwdev); 6294 6295 rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); 6296 6297 if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max) 6298 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true); 6299 else 6300 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); 6301 } 6302 6303 static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev, 6304 struct rtw89_sta_link *rtwsta_link) 6305 { 6306 struct rtw89_hal *hal = &rtwdev->hal; 6307 u8 rssi_a, rssi_b; 6308 u32 candidate; 6309 6310 rssi_a = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_A]); 6311 rssi_b = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_B]); 6312 6313 if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH) 6314 candidate = RF_A; 6315 else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH) 6316 candidate = RF_B; 6317 else 6318 return; 6319 6320 if (hal->antenna_tx == candidate) 6321 return; 6322 6323 hal->antenna_tx = candidate; 6324 rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta_link); 6325 6326 if (hal->antenna_tx == RF_A) { 6327 rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12); 6328 rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11); 6329 } else if (hal->antenna_tx == RF_B) { 6330 rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11); 6331 rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12); 6332 } 6333 } 6334 6335 static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta) 6336 { 6337 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 6338 struct rtw89_dev *rtwdev = rtwsta->rtwdev; 6339 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 6340 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 6341 struct rtw89_vif_link *rtwvif_link; 6342 struct rtw89_sta_link *rtwsta_link; 6343 unsigned int link_id; 6344 bool *done = data; 6345 6346 if (WARN(ieee80211_vif_is_mld(vif), "MLD mix path_div\n")) 6347 return; 6348 6349 if (sta->tdls) 6350 return; 6351 6352 if (*done) 6353 return; 6354 6355 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 6356 rtwvif_link = rtwsta_link->rtwvif_link; 6357 if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION) 6358 continue; 6359 6360 *done = true; 6361 __rtw89_phy_tx_path_div_sta_iter(rtwdev, rtwsta_link); 6362 return; 6363 } 6364 } 6365 6366 void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev) 6367 { 6368 struct rtw89_hal *hal = &rtwdev->hal; 6369 bool done = false; 6370 6371 if (!hal->tx_path_diversity) 6372 return; 6373 6374 ieee80211_iterate_stations_atomic(rtwdev->hw, 6375 rtw89_phy_tx_path_div_sta_iter, 6376 &done); 6377 } 6378 6379 #define ANTDIV_MAIN 0 6380 #define ANTDIV_AUX 1 6381 6382 static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev) 6383 { 6384 struct rtw89_hal *hal = &rtwdev->hal; 6385 u8 default_ant, optional_ant; 6386 6387 if (!hal->ant_diversity || hal->antenna_tx == 0) 6388 return; 6389 6390 if (hal->antenna_tx == RF_B) { 6391 default_ant = ANTDIV_AUX; 6392 optional_ant = ANTDIV_MAIN; 6393 } else { 6394 default_ant = ANTDIV_MAIN; 6395 optional_ant = ANTDIV_AUX; 6396 } 6397 6398 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL, 6399 default_ant, RTW89_PHY_0); 6400 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI, 6401 default_ant, RTW89_PHY_0); 6402 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT, 6403 optional_ant, RTW89_PHY_0); 6404 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI, 6405 default_ant, RTW89_PHY_0); 6406 } 6407 6408 static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev) 6409 { 6410 struct rtw89_hal *hal = &rtwdev->hal; 6411 6412 hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A; 6413 hal->antenna_tx = hal->antenna_rx; 6414 } 6415 6416 static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev) 6417 { 6418 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6419 struct rtw89_hal *hal = &rtwdev->hal; 6420 bool no_change = false; 6421 u8 main_rssi, aux_rssi; 6422 u8 main_evm, aux_evm; 6423 u32 candidate; 6424 6425 antdiv->get_stats = false; 6426 antdiv->training_count = 0; 6427 6428 main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats); 6429 main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats); 6430 aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats); 6431 aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats); 6432 6433 if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH) 6434 candidate = RF_A; 6435 else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH) 6436 candidate = RF_B; 6437 else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH) 6438 candidate = RF_A; 6439 else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH) 6440 candidate = RF_B; 6441 else 6442 no_change = true; 6443 6444 if (no_change) { 6445 /* swap back from training antenna to original */ 6446 rtw89_phy_swap_hal_antenna(rtwdev); 6447 return; 6448 } 6449 6450 hal->antenna_tx = candidate; 6451 hal->antenna_rx = candidate; 6452 } 6453 6454 static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev) 6455 { 6456 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6457 u64 state_period; 6458 6459 if (antdiv->training_count % 2 == 0) { 6460 if (antdiv->training_count == 0) 6461 rtw89_phy_antdiv_sts_reset(rtwdev); 6462 6463 antdiv->get_stats = true; 6464 state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL); 6465 } else { 6466 antdiv->get_stats = false; 6467 state_period = msecs_to_jiffies(ANTDIV_DELAY); 6468 6469 rtw89_phy_swap_hal_antenna(rtwdev); 6470 rtw89_phy_antdiv_set_ant(rtwdev); 6471 } 6472 6473 antdiv->training_count++; 6474 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 6475 state_period); 6476 } 6477 6478 void rtw89_phy_antdiv_work(struct work_struct *work) 6479 { 6480 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6481 antdiv_work.work); 6482 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6483 6484 mutex_lock(&rtwdev->mutex); 6485 6486 if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) { 6487 rtw89_phy_antdiv_training_state(rtwdev); 6488 } else { 6489 rtw89_phy_antdiv_decision_state(rtwdev); 6490 rtw89_phy_antdiv_set_ant(rtwdev); 6491 } 6492 6493 mutex_unlock(&rtwdev->mutex); 6494 } 6495 6496 void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev) 6497 { 6498 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6499 struct rtw89_hal *hal = &rtwdev->hal; 6500 u8 rssi, rssi_pre; 6501 6502 if (!hal->ant_diversity || hal->ant_diversity_fixed) 6503 return; 6504 6505 rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats); 6506 rssi_pre = antdiv->rssi_pre; 6507 antdiv->rssi_pre = rssi; 6508 rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats); 6509 6510 if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH) 6511 return; 6512 6513 antdiv->training_count = 0; 6514 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0); 6515 } 6516 6517 static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev) 6518 { 6519 rtw89_phy_ccx_top_setting_init(rtwdev); 6520 rtw89_phy_ifs_clm_setting_init(rtwdev); 6521 } 6522 6523 static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev) 6524 { 6525 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6526 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6527 6528 memset(edcca_bak, 0, sizeof(*edcca_bak)); 6529 6530 if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) { 6531 rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0); 6532 rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2); 6533 rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1); 6534 rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0); 6535 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0); 6536 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0); 6537 rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0); 6538 rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1); 6539 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1); 6540 } 6541 6542 rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st, 6543 edcca_regs->tx_collision_t2r_st_mask, 0x29); 6544 } 6545 6546 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) 6547 { 6548 rtw89_phy_stat_init(rtwdev); 6549 6550 rtw89_chip_bb_sethw(rtwdev); 6551 6552 rtw89_phy_env_monitor_init(rtwdev); 6553 rtw89_physts_parsing_init(rtwdev); 6554 rtw89_phy_dig_init(rtwdev); 6555 rtw89_phy_cfo_init(rtwdev); 6556 rtw89_phy_bb_wrap_init(rtwdev); 6557 rtw89_phy_edcca_init(rtwdev); 6558 rtw89_phy_ch_info_init(rtwdev); 6559 rtw89_phy_ul_tb_info_init(rtwdev); 6560 rtw89_phy_antdiv_init(rtwdev); 6561 rtw89_chip_rfe_gpio(rtwdev); 6562 rtw89_phy_antdiv_set_ant(rtwdev); 6563 6564 rtw89_chip_rfk_hw_init(rtwdev); 6565 rtw89_phy_init_rf_nctl(rtwdev); 6566 rtw89_chip_rfk_init(rtwdev); 6567 rtw89_chip_set_txpwr_ctrl(rtwdev); 6568 rtw89_chip_power_trim(rtwdev); 6569 rtw89_chip_cfg_txrx_path(rtwdev); 6570 } 6571 6572 void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev) 6573 { 6574 rtw89_phy_env_monitor_init(rtwdev); 6575 rtw89_physts_parsing_init(rtwdev); 6576 } 6577 6578 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, 6579 struct rtw89_vif_link *rtwvif_link) 6580 { 6581 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 6582 const struct rtw89_chip_info *chip = rtwdev->chip; 6583 const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld; 6584 enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx; 6585 struct ieee80211_bss_conf *bss_conf; 6586 u8 bss_color; 6587 6588 rcu_read_lock(); 6589 6590 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 6591 if (!bss_conf->he_support || !vif->cfg.assoc) { 6592 rcu_read_unlock(); 6593 return; 6594 } 6595 6596 bss_color = bss_conf->he_bss_color.color; 6597 6598 rcu_read_unlock(); 6599 6600 rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1, 6601 phy_idx); 6602 rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT, 6603 bss_color, phy_idx); 6604 rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID, 6605 vif->cfg.aid, phy_idx); 6606 } 6607 6608 static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc) 6609 { 6610 return desc->ch != 0; 6611 } 6612 6613 static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc, 6614 const struct rtw89_chan *chan) 6615 { 6616 if (!rfk_chan_validate_desc(desc)) 6617 return false; 6618 6619 if (desc->ch != chan->channel) 6620 return false; 6621 6622 if (desc->has_band && desc->band != chan->band_type) 6623 return false; 6624 6625 if (desc->has_bw && desc->bw != chan->band_width) 6626 return false; 6627 6628 return true; 6629 } 6630 6631 struct rfk_chan_iter_data { 6632 const struct rtw89_rfk_chan_desc desc; 6633 unsigned int found; 6634 }; 6635 6636 static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data) 6637 { 6638 struct rfk_chan_iter_data *iter_data = data; 6639 6640 if (rfk_chan_is_equivalent(&iter_data->desc, chan)) 6641 iter_data->found++; 6642 6643 return 0; 6644 } 6645 6646 u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev, 6647 const struct rtw89_rfk_chan_desc *desc, u8 desc_nr, 6648 const struct rtw89_chan *target_chan) 6649 { 6650 int sel = -1; 6651 u8 i; 6652 6653 for (i = 0; i < desc_nr; i++) { 6654 struct rfk_chan_iter_data iter_data = { 6655 .desc = desc[i], 6656 }; 6657 6658 if (rfk_chan_is_equivalent(&desc[i], target_chan)) 6659 return i; 6660 6661 rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data); 6662 if (!iter_data.found && sel == -1) 6663 sel = i; 6664 } 6665 6666 if (sel == -1) { 6667 rtw89_debug(rtwdev, RTW89_DBG_RFK, 6668 "no idle rfk entry; force replace the first\n"); 6669 sel = 0; 6670 } 6671 6672 return sel; 6673 } 6674 EXPORT_SYMBOL(rtw89_rfk_chan_lookup); 6675 6676 static void 6677 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6678 { 6679 rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data); 6680 } 6681 6682 static void 6683 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6684 { 6685 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); 6686 } 6687 6688 static void 6689 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6690 { 6691 rtw89_phy_write32_set(rtwdev, def->addr, def->mask); 6692 } 6693 6694 static void 6695 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6696 { 6697 rtw89_phy_write32_clr(rtwdev, def->addr, def->mask); 6698 } 6699 6700 static void 6701 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6702 { 6703 udelay(def->data); 6704 } 6705 6706 static void 6707 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = { 6708 [RTW89_RFK_F_WRF] = _rfk_write_rf, 6709 [RTW89_RFK_F_WM] = _rfk_write32_mask, 6710 [RTW89_RFK_F_WS] = _rfk_write32_set, 6711 [RTW89_RFK_F_WC] = _rfk_write32_clr, 6712 [RTW89_RFK_F_DELAY] = _rfk_delay, 6713 }; 6714 6715 #if defined(__linux__) 6716 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM); 6717 #elif defined(__FreeBSD__) 6718 rtw89_static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM); 6719 #endif 6720 6721 void 6722 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl) 6723 { 6724 const struct rtw89_reg5_def *p = tbl->defs; 6725 const struct rtw89_reg5_def *end = tbl->defs + tbl->size; 6726 6727 for (; p < end; p++) 6728 _rfk_handler[p->flag](rtwdev, p); 6729 } 6730 EXPORT_SYMBOL(rtw89_rfk_parser); 6731 6732 #define RTW89_TSSI_FAST_MODE_NUM 4 6733 6734 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = { 6735 {0xD934, 0xff0000}, 6736 {0xD934, 0xff000000}, 6737 {0xD938, 0xff}, 6738 {0xD934, 0xff00}, 6739 }; 6740 6741 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = { 6742 {0xD930, 0xff0000}, 6743 {0xD930, 0xff000000}, 6744 {0xD934, 0xff}, 6745 {0xD930, 0xff00}, 6746 }; 6747 6748 static 6749 void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev, 6750 enum rtw89_mac_idx mac_idx, 6751 enum rtw89_tssi_bandedge_cfg bandedge_cfg, 6752 u32 val) 6753 { 6754 const struct rtw89_reg_def *regs; 6755 u32 reg; 6756 int i; 6757 6758 if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) 6759 regs = rtw89_tssi_fastmode_regs_flat; 6760 else 6761 regs = rtw89_tssi_fastmode_regs_level; 6762 6763 for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) { 6764 reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); 6765 rtw89_write32_mask(rtwdev, reg, regs[i].mask, val); 6766 } 6767 } 6768 6769 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = { 6770 {0xD91C, 0xff000000}, 6771 {0xD920, 0xff}, 6772 {0xD920, 0xff00}, 6773 {0xD920, 0xff0000}, 6774 {0xD920, 0xff000000}, 6775 {0xD924, 0xff}, 6776 {0xD924, 0xff00}, 6777 {0xD914, 0xff000000}, 6778 {0xD918, 0xff}, 6779 {0xD918, 0xff00}, 6780 {0xD918, 0xff0000}, 6781 {0xD918, 0xff000000}, 6782 {0xD91C, 0xff}, 6783 {0xD91C, 0xff00}, 6784 {0xD91C, 0xff0000}, 6785 }; 6786 6787 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = { 6788 {0xD910, 0xff}, 6789 {0xD910, 0xff00}, 6790 {0xD910, 0xff0000}, 6791 {0xD910, 0xff000000}, 6792 {0xD914, 0xff}, 6793 {0xD914, 0xff00}, 6794 {0xD914, 0xff0000}, 6795 {0xD908, 0xff}, 6796 {0xD908, 0xff00}, 6797 {0xD908, 0xff0000}, 6798 {0xD908, 0xff000000}, 6799 {0xD90C, 0xff}, 6800 {0xD90C, 0xff00}, 6801 {0xD90C, 0xff0000}, 6802 {0xD90C, 0xff000000}, 6803 }; 6804 6805 void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, 6806 enum rtw89_mac_idx mac_idx, 6807 enum rtw89_tssi_bandedge_cfg bandedge_cfg) 6808 { 6809 const struct rtw89_chip_info *chip = rtwdev->chip; 6810 const struct rtw89_reg_def *regs; 6811 const u32 *data; 6812 u32 reg; 6813 int i; 6814 6815 if (bandedge_cfg >= RTW89_TSSI_CFG_NUM) 6816 return; 6817 6818 if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) 6819 regs = rtw89_tssi_bandedge_regs_flat; 6820 else 6821 regs = rtw89_tssi_bandedge_regs_level; 6822 6823 data = chip->tssi_dbw_table->data[bandedge_cfg]; 6824 6825 for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) { 6826 reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); 6827 rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]); 6828 } 6829 6830 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx); 6831 rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg); 6832 6833 rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg, 6834 data[RTW89_TSSI_SBW20]); 6835 } 6836 EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg); 6837 6838 static 6839 const u8 rtw89_ch_base_table[16] = {1, 0xff, 6840 36, 100, 132, 149, 0xff, 6841 1, 33, 65, 97, 129, 161, 193, 225, 0xff}; 6842 #define RTW89_CH_BASE_IDX_2G 0 6843 #define RTW89_CH_BASE_IDX_5G_FIRST 2 6844 #define RTW89_CH_BASE_IDX_5G_LAST 5 6845 #define RTW89_CH_BASE_IDX_6G_FIRST 7 6846 #define RTW89_CH_BASE_IDX_6G_LAST 14 6847 6848 #define RTW89_CH_BASE_IDX_MASK GENMASK(7, 4) 6849 #define RTW89_CH_OFFSET_MASK GENMASK(3, 0) 6850 6851 u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band) 6852 { 6853 u8 chan_idx; 6854 u8 last, first; 6855 u8 idx; 6856 6857 switch (band) { 6858 case RTW89_BAND_2G: 6859 chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) | 6860 FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch); 6861 return chan_idx; 6862 case RTW89_BAND_5G: 6863 first = RTW89_CH_BASE_IDX_5G_FIRST; 6864 last = RTW89_CH_BASE_IDX_5G_LAST; 6865 break; 6866 case RTW89_BAND_6G: 6867 first = RTW89_CH_BASE_IDX_6G_FIRST; 6868 last = RTW89_CH_BASE_IDX_6G_LAST; 6869 break; 6870 default: 6871 rtw89_warn(rtwdev, "Unsupported band %d\n", band); 6872 return 0; 6873 } 6874 6875 for (idx = last; idx >= first; idx--) 6876 if (central_ch >= rtw89_ch_base_table[idx]) 6877 break; 6878 6879 if (idx < first) { 6880 rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch); 6881 return 0; 6882 } 6883 6884 chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) | 6885 FIELD_PREP(RTW89_CH_OFFSET_MASK, 6886 (central_ch - rtw89_ch_base_table[idx]) >> 1); 6887 return chan_idx; 6888 } 6889 EXPORT_SYMBOL(rtw89_encode_chan_idx); 6890 6891 void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx, 6892 u8 *ch, enum nl80211_band *band) 6893 { 6894 u8 idx, offset; 6895 6896 idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx); 6897 offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx); 6898 6899 if (idx == RTW89_CH_BASE_IDX_2G) { 6900 *band = NL80211_BAND_2GHZ; 6901 *ch = offset; 6902 return; 6903 } 6904 6905 *band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ; 6906 *ch = rtw89_ch_base_table[idx] + (offset << 1); 6907 } 6908 EXPORT_SYMBOL(rtw89_decode_chan_idx); 6909 6910 void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan) 6911 { 6912 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6913 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6914 6915 if (scan) { 6916 edcca_bak->a = 6917 rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, 6918 edcca_regs->edcca_mask); 6919 edcca_bak->p = 6920 rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, 6921 edcca_regs->edcca_p_mask); 6922 edcca_bak->ppdu = 6923 rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level, 6924 edcca_regs->ppdu_mask); 6925 6926 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6927 edcca_regs->edcca_mask, EDCCA_MAX); 6928 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6929 edcca_regs->edcca_p_mask, EDCCA_MAX); 6930 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6931 edcca_regs->ppdu_mask, EDCCA_MAX); 6932 } else { 6933 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6934 edcca_regs->edcca_mask, 6935 edcca_bak->a); 6936 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6937 edcca_regs->edcca_p_mask, 6938 edcca_bak->p); 6939 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6940 edcca_regs->ppdu_mask, 6941 edcca_bak->ppdu); 6942 } 6943 } 6944 6945 static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) 6946 { 6947 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6948 bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80; 6949 s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80; 6950 u8 path, per20_bitmap; 6951 u8 pwdb[8]; 6952 u32 tmp; 6953 6954 if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA)) 6955 return; 6956 6957 if (rtwdev->chip->chip_id == RTL8922A) 6958 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6959 edcca_regs->rpt_sel_be_mask, 0); 6960 6961 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6962 edcca_regs->rpt_sel_mask, 0); 6963 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6964 path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK); 6965 flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80); 6966 flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40); 6967 flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20); 6968 flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20); 6969 flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB); 6970 pwdb_s20 = u32_get_bits(tmp, MASKBYTE1); 6971 pwdb_p20 = u32_get_bits(tmp, MASKBYTE2); 6972 pwdb_fb = u32_get_bits(tmp, MASKBYTE3); 6973 6974 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6975 edcca_regs->rpt_sel_mask, 4); 6976 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6977 pwdb_s80 = u32_get_bits(tmp, MASKBYTE1); 6978 pwdb_s40 = u32_get_bits(tmp, MASKBYTE2); 6979 6980 per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a, 6981 MASKBYTE0); 6982 6983 if (rtwdev->chip->chip_id == RTL8922A) { 6984 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6985 edcca_regs->rpt_sel_be_mask, 4); 6986 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6987 pwdb[0] = u32_get_bits(tmp, MASKBYTE3); 6988 pwdb[1] = u32_get_bits(tmp, MASKBYTE2); 6989 pwdb[2] = u32_get_bits(tmp, MASKBYTE1); 6990 pwdb[3] = u32_get_bits(tmp, MASKBYTE0); 6991 6992 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6993 edcca_regs->rpt_sel_be_mask, 5); 6994 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6995 pwdb[4] = u32_get_bits(tmp, MASKBYTE3); 6996 pwdb[5] = u32_get_bits(tmp, MASKBYTE2); 6997 pwdb[6] = u32_get_bits(tmp, MASKBYTE1); 6998 pwdb[7] = u32_get_bits(tmp, MASKBYTE0); 6999 } else { 7000 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 7001 edcca_regs->rpt_sel_mask, 0); 7002 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 7003 pwdb[0] = u32_get_bits(tmp, MASKBYTE3); 7004 pwdb[1] = u32_get_bits(tmp, MASKBYTE2); 7005 7006 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 7007 edcca_regs->rpt_sel_mask, 1); 7008 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 7009 pwdb[2] = u32_get_bits(tmp, MASKBYTE3); 7010 pwdb[3] = u32_get_bits(tmp, MASKBYTE2); 7011 7012 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 7013 edcca_regs->rpt_sel_mask, 2); 7014 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 7015 pwdb[4] = u32_get_bits(tmp, MASKBYTE3); 7016 pwdb[5] = u32_get_bits(tmp, MASKBYTE2); 7017 7018 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 7019 edcca_regs->rpt_sel_mask, 3); 7020 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 7021 pwdb[6] = u32_get_bits(tmp, MASKBYTE3); 7022 pwdb[7] = u32_get_bits(tmp, MASKBYTE2); 7023 } 7024 7025 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 7026 "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap); 7027 7028 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 7029 "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n", 7030 pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5], 7031 pwdb[6], pwdb[7]); 7032 7033 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 7034 "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n", 7035 path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80); 7036 7037 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 7038 "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n", 7039 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80); 7040 } 7041 7042 static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev) 7043 { 7044 struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; 7045 bool is_linked = rtwdev->total_sta_assoc > 0; 7046 u8 rssi_min = ch_info->rssi_min >> 1; 7047 u8 edcca_thre; 7048 7049 if (!is_linked) { 7050 edcca_thre = EDCCA_MAX; 7051 } else { 7052 edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER - 7053 EDCCA_TH_REF; 7054 edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB); 7055 } 7056 7057 return edcca_thre; 7058 } 7059 7060 void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev) 7061 { 7062 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 7063 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 7064 u8 th; 7065 7066 th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev); 7067 if (th == edcca_bak->th_old) 7068 return; 7069 7070 edcca_bak->th_old = th; 7071 7072 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 7073 "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th); 7074 7075 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 7076 edcca_regs->edcca_mask, th); 7077 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 7078 edcca_regs->edcca_p_mask, th); 7079 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 7080 edcca_regs->ppdu_mask, th); 7081 } 7082 7083 void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev) 7084 { 7085 struct rtw89_hal *hal = &rtwdev->hal; 7086 7087 if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA)) 7088 return; 7089 7090 rtw89_phy_edcca_thre_calc(rtwdev); 7091 rtw89_phy_edcca_log(rtwdev); 7092 } 7093 7094 enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, 7095 enum rtw89_phy_idx phy_idx) 7096 { 7097 rtw89_debug(rtwdev, RTW89_DBG_RFK, 7098 "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n", 7099 rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx); 7100 7101 switch (rtwdev->mlo_dbcc_mode) { 7102 case MLO_1_PLUS_1_1RF: 7103 if (phy_idx == RTW89_PHY_0) 7104 return RF_A; 7105 else 7106 return RF_B; 7107 case MLO_1_PLUS_1_2RF: 7108 if (phy_idx == RTW89_PHY_0) 7109 return RF_A; 7110 else 7111 return RF_D; 7112 case MLO_0_PLUS_2_1RF: 7113 case MLO_2_PLUS_0_1RF: 7114 /* for both PHY 0/1 */ 7115 return RF_AB; 7116 case MLO_0_PLUS_2_2RF: 7117 case MLO_2_PLUS_0_2RF: 7118 case MLO_2_PLUS_2_2RF: 7119 default: 7120 if (phy_idx == RTW89_PHY_0) 7121 return RF_AB; 7122 else 7123 return RF_CD; 7124 } 7125 } 7126 EXPORT_SYMBOL(rtw89_phy_get_kpath); 7127 7128 enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev, 7129 enum rtw89_phy_idx phy_idx) 7130 { 7131 rtw89_debug(rtwdev, RTW89_DBG_RFK, 7132 "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n", 7133 rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx); 7134 7135 switch (rtwdev->mlo_dbcc_mode) { 7136 case MLO_1_PLUS_1_1RF: 7137 if (phy_idx == RTW89_PHY_0) 7138 return RF_PATH_A; 7139 else 7140 return RF_PATH_B; 7141 case MLO_1_PLUS_1_2RF: 7142 if (phy_idx == RTW89_PHY_0) 7143 return RF_PATH_A; 7144 else 7145 return RF_PATH_D; 7146 case MLO_0_PLUS_2_1RF: 7147 case MLO_2_PLUS_0_1RF: 7148 if (phy_idx == RTW89_PHY_0) 7149 return RF_PATH_A; 7150 else 7151 return RF_PATH_B; 7152 case MLO_0_PLUS_2_2RF: 7153 case MLO_2_PLUS_0_2RF: 7154 case MLO_2_PLUS_2_2RF: 7155 default: 7156 if (phy_idx == RTW89_PHY_0) 7157 return RF_PATH_A; 7158 else 7159 return RF_PATH_C; 7160 } 7161 } 7162 EXPORT_SYMBOL(rtw89_phy_get_syn_sel); 7163 7164 static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = { 7165 .setting_addr = R_CCX, 7166 .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK, 7167 .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK, 7168 .trig_opt_mask = B_CCX_TRIG_OPT_MSK, 7169 .en_mask = B_CCX_EN_MSK, 7170 .ifs_cnt_addr = R_IFS_COUNTER, 7171 .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK, 7172 .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK, 7173 .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK, 7174 .ifs_collect_en_mask = B_IFS_COLLECT_EN, 7175 .ifs_t1_addr = R_IFS_T1, 7176 .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK, 7177 .ifs_t1_en_mask = B_IFS_T1_EN_MSK, 7178 .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK, 7179 .ifs_t2_addr = R_IFS_T2, 7180 .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK, 7181 .ifs_t2_en_mask = B_IFS_T2_EN_MSK, 7182 .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK, 7183 .ifs_t3_addr = R_IFS_T3, 7184 .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK, 7185 .ifs_t3_en_mask = B_IFS_T3_EN_MSK, 7186 .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK, 7187 .ifs_t4_addr = R_IFS_T4, 7188 .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK, 7189 .ifs_t4_en_mask = B_IFS_T4_EN_MSK, 7190 .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK, 7191 .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT, 7192 .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK, 7193 .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK, 7194 .ifs_clm_cca_addr = R_IFS_CLM_CCA, 7195 .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK, 7196 .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK, 7197 .ifs_clm_fa_addr = R_IFS_CLM_FA, 7198 .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK, 7199 .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK, 7200 .ifs_his_addr = R_IFS_HIS, 7201 .ifs_t4_his_mask = B_IFS_T4_HIS_MSK, 7202 .ifs_t3_his_mask = B_IFS_T3_HIS_MSK, 7203 .ifs_t2_his_mask = B_IFS_T2_HIS_MSK, 7204 .ifs_t1_his_mask = B_IFS_T1_HIS_MSK, 7205 .ifs_avg_l_addr = R_IFS_AVG_L, 7206 .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK, 7207 .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK, 7208 .ifs_avg_h_addr = R_IFS_AVG_H, 7209 .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK, 7210 .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK, 7211 .ifs_cca_l_addr = R_IFS_CCA_L, 7212 .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK, 7213 .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK, 7214 .ifs_cca_h_addr = R_IFS_CCA_H, 7215 .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK, 7216 .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK, 7217 .ifs_total_addr = R_IFSCNT, 7218 .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, 7219 .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, 7220 }; 7221 7222 static const struct rtw89_physts_regs rtw89_physts_regs_ax = { 7223 .setting_addr = R_PLCP_HISTOGRAM, 7224 .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL, 7225 .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, 7226 }; 7227 7228 static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = { 7229 .comp = R_DCFO_WEIGHT, 7230 .weighting_mask = B_DCFO_WEIGHT_MSK, 7231 .comp_seg0 = R_DCFO_OPT, 7232 .valid_0_mask = B_DCFO_OPT_EN, 7233 }; 7234 7235 const struct rtw89_phy_gen_def rtw89_phy_gen_ax = { 7236 .cr_base = 0x10000, 7237 .ccx = &rtw89_ccx_regs_ax, 7238 .physts = &rtw89_physts_regs_ax, 7239 .cfo = &rtw89_cfo_regs_ax, 7240 .phy0_phy1_offset = rtw89_phy0_phy1_offset_ax, 7241 .config_bb_gain = rtw89_phy_config_bb_gain_ax, 7242 .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax, 7243 .bb_wrap_init = NULL, 7244 .ch_info_init = NULL, 7245 7246 .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax, 7247 .set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax, 7248 .set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax, 7249 .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax, 7250 }; 7251 EXPORT_SYMBOL(rtw89_phy_gen_ax); 7252