1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "acpi.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "sar.h" 15 #include "txrx.h" 16 #include "util.h" 17 18 static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr) 19 { 20 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 21 22 return phy->phy0_phy1_offset(rtwdev, addr); 23 } 24 25 static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev, 26 const struct rtw89_ra_report *report) 27 { 28 u32 bit_rate = report->bit_rate; 29 30 /* lower than ofdm, do not aggregate */ 31 if (bit_rate < 550) 32 return 1; 33 34 /* avoid AMSDU for legacy rate */ 35 if (report->might_fallback_legacy) 36 return 1; 37 38 /* lower than 20M vht 2ss mcs8, make it small */ 39 if (bit_rate < 1800) 40 return 1200; 41 42 /* lower than 40M vht 2ss mcs9, make it medium */ 43 if (bit_rate < 4000) 44 return 2600; 45 46 /* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */ 47 if (bit_rate < 7000) 48 return 3500; 49 50 return rtwdev->chip->max_amsdu_limit; 51 } 52 53 static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap) 54 { 55 u64 ra_mask = 0; 56 u8 mcs_cap; 57 int i, nss; 58 59 for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) { 60 mcs_cap = mcs_map & 0x3; 61 switch (mcs_cap) { 62 case 2: 63 ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss; 64 break; 65 case 1: 66 ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss; 67 break; 68 case 0: 69 ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss; 70 break; 71 default: 72 break; 73 } 74 } 75 76 return ra_mask; 77 } 78 79 static u64 get_he_ra_mask(struct ieee80211_link_sta *link_sta) 80 { 81 struct ieee80211_sta_he_cap cap = link_sta->he_cap; 82 u16 mcs_map; 83 84 switch (link_sta->bandwidth) { 85 case IEEE80211_STA_RX_BW_160: 86 if (cap.he_cap_elem.phy_cap_info[0] & 87 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 88 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80); 89 else 90 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160); 91 break; 92 default: 93 mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80); 94 } 95 96 /* MCS11, MCS9, MCS7 */ 97 return get_mcs_ra_mask(mcs_map, 11, 2); 98 } 99 100 static u64 get_eht_mcs_ra_mask(u8 *max_nss, u8 start_mcs, u8 n_nss) 101 { 102 u64 nss_mcs_shift; 103 u64 nss_mcs_val; 104 u64 mask = 0; 105 int i, j; 106 u8 nss; 107 108 for (i = 0; i < n_nss; i++) { 109 nss = u8_get_bits(max_nss[i], IEEE80211_EHT_MCS_NSS_RX); 110 if (!nss) 111 continue; 112 113 nss_mcs_val = GENMASK_ULL(start_mcs + i * 2, 0); 114 115 for (j = 0, nss_mcs_shift = 12; j < nss; j++, nss_mcs_shift += 16) 116 mask |= nss_mcs_val << nss_mcs_shift; 117 } 118 119 return mask; 120 } 121 122 static u64 get_eht_ra_mask(struct ieee80211_link_sta *link_sta) 123 { 124 struct ieee80211_sta_eht_cap *eht_cap = &link_sta->eht_cap; 125 struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz; 126 struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss; 127 u8 *he_phy_cap = link_sta->he_cap.he_cap_elem.phy_cap_info; 128 129 switch (link_sta->bandwidth) { 130 case IEEE80211_STA_RX_BW_320: 131 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._320; 132 /* MCS 9, 11, 13 */ 133 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 134 case IEEE80211_STA_RX_BW_160: 135 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160; 136 /* MCS 9, 11, 13 */ 137 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 138 case IEEE80211_STA_RX_BW_20: 139 if (!(he_phy_cap[0] & 140 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { 141 mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz; 142 /* MCS 7, 9, 11, 13 */ 143 return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4); 144 } 145 fallthrough; 146 case IEEE80211_STA_RX_BW_80: 147 default: 148 mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80; 149 /* MCS 9, 11, 13 */ 150 return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3); 151 } 152 } 153 154 #define RA_FLOOR_TABLE_SIZE 7 155 #define RA_FLOOR_UP_GAP 3 156 static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi, 157 u8 ratr_state) 158 { 159 u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100}; 160 u8 rssi_lv = 0; 161 u8 i; 162 163 rssi >>= 1; 164 for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) { 165 if (i >= ratr_state) 166 rssi_lv_t[i] += RA_FLOOR_UP_GAP; 167 if (rssi < rssi_lv_t[i]) { 168 rssi_lv = i; 169 break; 170 } 171 } 172 if (rssi_lv == 0) 173 return 0xffffffffffffffffULL; 174 else if (rssi_lv == 1) 175 return 0xfffffffffffffff0ULL; 176 else if (rssi_lv == 2) 177 return 0xffffffffffffefe0ULL; 178 else if (rssi_lv == 3) 179 return 0xffffffffffffcfc0ULL; 180 else if (rssi_lv == 4) 181 return 0xffffffffffff8f80ULL; 182 else if (rssi_lv >= 5) 183 return 0xffffffffffff0f00ULL; 184 185 return 0xffffffffffffffffULL; 186 } 187 188 static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak) 189 { 190 if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0) 191 ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); 192 193 if (ra_mask == 0) 194 ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)); 195 196 return ra_mask; 197 } 198 199 static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, 200 struct rtw89_sta_link *rtwsta_link, 201 struct ieee80211_link_sta *link_sta, 202 const struct rtw89_chan *chan) 203 { 204 struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask; 205 enum nl80211_band band; 206 u64 cfg_mask; 207 208 if (!rtwsta_link->use_cfg_mask) 209 return -1; 210 211 switch (chan->band_type) { 212 case RTW89_BAND_2G: 213 band = NL80211_BAND_2GHZ; 214 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy, 215 RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES); 216 break; 217 case RTW89_BAND_5G: 218 band = NL80211_BAND_5GHZ; 219 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy, 220 RA_MASK_OFDM_RATES); 221 break; 222 case RTW89_BAND_6G: 223 band = NL80211_BAND_6GHZ; 224 cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_6GHZ].legacy, 225 RA_MASK_OFDM_RATES); 226 break; 227 default: 228 rtw89_warn(rtwdev, "unhandled band type %d\n", chan->band_type); 229 return -1; 230 } 231 232 if (link_sta->he_cap.has_he) { 233 cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0], 234 RA_MASK_HE_1SS_RATES); 235 cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1], 236 RA_MASK_HE_2SS_RATES); 237 } else if (link_sta->vht_cap.vht_supported) { 238 cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0], 239 RA_MASK_VHT_1SS_RATES); 240 cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1], 241 RA_MASK_VHT_2SS_RATES); 242 } else if (link_sta->ht_cap.ht_supported) { 243 cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0], 244 RA_MASK_HT_1SS_RATES); 245 cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1], 246 RA_MASK_HT_2SS_RATES); 247 } 248 249 return cfg_mask; 250 } 251 252 static const u64 253 rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES, 254 RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES}; 255 static const u64 256 rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES, 257 RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES}; 258 static const u64 259 rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES, 260 RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES}; 261 static const u64 262 rtw89_ra_mask_eht_rates[4] = {RA_MASK_EHT_1SS_RATES, RA_MASK_EHT_2SS_RATES, 263 RA_MASK_EHT_3SS_RATES, RA_MASK_EHT_4SS_RATES}; 264 265 static void rtw89_phy_ra_gi_ltf(struct rtw89_dev *rtwdev, 266 struct rtw89_sta_link *rtwsta_link, 267 struct ieee80211_link_sta *link_sta, 268 const struct rtw89_chan *chan, 269 bool *fix_giltf_en, u8 *fix_giltf) 270 { 271 struct cfg80211_bitrate_mask *mask = &rtwsta_link->mask; 272 u8 band = chan->band_type; 273 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 274 u8 he_ltf = mask->control[nl_band].he_ltf; 275 u8 he_gi = mask->control[nl_band].he_gi; 276 277 *fix_giltf_en = true; 278 279 if (rtwdev->chip->chip_id == RTL8852C && 280 chan->band_width == RTW89_CHANNEL_WIDTH_160 && 281 rtw89_sta_link_has_su_mu_4xhe08(link_sta)) 282 *fix_giltf = RTW89_GILTF_SGI_4XHE08; 283 else 284 *fix_giltf = RTW89_GILTF_2XHE08; 285 286 if (!(rtwsta_link->use_cfg_mask && link_sta->he_cap.has_he)) 287 return; 288 289 if (he_ltf == 2 && he_gi == 2) { 290 *fix_giltf = RTW89_GILTF_LGI_4XHE32; 291 } else if (he_ltf == 2 && he_gi == 0) { 292 *fix_giltf = RTW89_GILTF_SGI_4XHE08; 293 } else if (he_ltf == 1 && he_gi == 1) { 294 *fix_giltf = RTW89_GILTF_2XHE16; 295 } else if (he_ltf == 1 && he_gi == 0) { 296 *fix_giltf = RTW89_GILTF_2XHE08; 297 } else if (he_ltf == 0 && he_gi == 1) { 298 *fix_giltf = RTW89_GILTF_1XHE16; 299 } else if (he_ltf == 0 && he_gi == 0) { 300 *fix_giltf = RTW89_GILTF_1XHE08; 301 } 302 } 303 304 static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev, 305 struct rtw89_vif_link *rtwvif_link, 306 struct rtw89_sta_link *rtwsta_link, 307 struct ieee80211_link_sta *link_sta, 308 bool p2p, bool csi) 309 { 310 struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif_link->rate_pattern; 311 struct rtw89_ra_info *ra = &rtwsta_link->ra; 312 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 313 rtwvif_link->chanctx_idx); 314 const u64 *high_rate_masks = rtw89_ra_mask_ht_rates; 315 u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi); 316 u64 ra_mask = 0; 317 u64 ra_mask_bak; 318 u8 mode = 0; 319 u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY; 320 u8 bw_mode = 0; 321 u8 stbc_en = 0; 322 u8 ldpc_en = 0; 323 u8 fix_giltf = 0; 324 u8 i; 325 bool sgi = false; 326 bool fix_giltf_en = false; 327 328 memset(ra, 0, sizeof(*ra)); 329 /* Set the ra mask from sta's capability */ 330 if (link_sta->eht_cap.has_eht) { 331 mode |= RTW89_RA_MODE_EHT; 332 ra_mask |= get_eht_ra_mask(link_sta); 333 high_rate_masks = rtw89_ra_mask_eht_rates; 334 rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta, 335 chan, &fix_giltf_en, &fix_giltf); 336 } else if (link_sta->he_cap.has_he) { 337 mode |= RTW89_RA_MODE_HE; 338 csi_mode = RTW89_RA_RPT_MODE_HE; 339 ra_mask |= get_he_ra_mask(link_sta); 340 high_rate_masks = rtw89_ra_mask_he_rates; 341 if (link_sta->he_cap.he_cap_elem.phy_cap_info[2] & 342 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) 343 stbc_en = 1; 344 if (link_sta->he_cap.he_cap_elem.phy_cap_info[1] & 345 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD) 346 ldpc_en = 1; 347 rtw89_phy_ra_gi_ltf(rtwdev, rtwsta_link, link_sta, 348 chan, &fix_giltf_en, &fix_giltf); 349 } else if (link_sta->vht_cap.vht_supported) { 350 u16 mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map); 351 352 mode |= RTW89_RA_MODE_VHT; 353 csi_mode = RTW89_RA_RPT_MODE_VHT; 354 /* MCS9 (non-20MHz), MCS8, MCS7 */ 355 if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) 356 ra_mask |= get_mcs_ra_mask(mcs_map, 8, 1); 357 else 358 ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1); 359 high_rate_masks = rtw89_ra_mask_vht_rates; 360 if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK) 361 stbc_en = 1; 362 if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) 363 ldpc_en = 1; 364 } else if (link_sta->ht_cap.ht_supported) { 365 mode |= RTW89_RA_MODE_HT; 366 csi_mode = RTW89_RA_RPT_MODE_HT; 367 ra_mask |= ((u64)link_sta->ht_cap.mcs.rx_mask[3] << 48) | 368 ((u64)link_sta->ht_cap.mcs.rx_mask[2] << 36) | 369 ((u64)link_sta->ht_cap.mcs.rx_mask[1] << 24) | 370 ((u64)link_sta->ht_cap.mcs.rx_mask[0] << 12); 371 high_rate_masks = rtw89_ra_mask_ht_rates; 372 if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) 373 stbc_en = 1; 374 if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) 375 ldpc_en = 1; 376 } 377 378 switch (chan->band_type) { 379 case RTW89_BAND_2G: 380 ra_mask |= link_sta->supp_rates[NL80211_BAND_2GHZ]; 381 if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xf) 382 mode |= RTW89_RA_MODE_CCK; 383 if (link_sta->supp_rates[NL80211_BAND_2GHZ] & 0xff0) 384 mode |= RTW89_RA_MODE_OFDM; 385 break; 386 case RTW89_BAND_5G: 387 ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_5GHZ] << 4; 388 mode |= RTW89_RA_MODE_OFDM; 389 break; 390 case RTW89_BAND_6G: 391 ra_mask |= (u64)link_sta->supp_rates[NL80211_BAND_6GHZ] << 4; 392 mode |= RTW89_RA_MODE_OFDM; 393 break; 394 default: 395 rtw89_err(rtwdev, "Unknown band type\n"); 396 break; 397 } 398 399 ra_mask_bak = ra_mask; 400 401 if (mode >= RTW89_RA_MODE_HT) { 402 u64 mask = 0; 403 for (i = 0; i < rtwdev->hal.tx_nss; i++) 404 mask |= high_rate_masks[i]; 405 if (mode & RTW89_RA_MODE_OFDM) 406 mask |= RA_MASK_SUBOFDM_RATES; 407 if (mode & RTW89_RA_MODE_CCK) 408 mask |= RA_MASK_SUBCCK_RATES; 409 ra_mask &= mask; 410 } else if (mode & RTW89_RA_MODE_OFDM) { 411 ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES); 412 } 413 414 if (mode != RTW89_RA_MODE_CCK) 415 ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0); 416 417 ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak); 418 ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan); 419 420 switch (link_sta->bandwidth) { 421 case IEEE80211_STA_RX_BW_160: 422 bw_mode = RTW89_CHANNEL_WIDTH_160; 423 sgi = link_sta->vht_cap.vht_supported && 424 (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160); 425 break; 426 case IEEE80211_STA_RX_BW_80: 427 bw_mode = RTW89_CHANNEL_WIDTH_80; 428 sgi = link_sta->vht_cap.vht_supported && 429 (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); 430 break; 431 case IEEE80211_STA_RX_BW_40: 432 bw_mode = RTW89_CHANNEL_WIDTH_40; 433 sgi = link_sta->ht_cap.ht_supported && 434 (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); 435 break; 436 default: 437 bw_mode = RTW89_CHANNEL_WIDTH_20; 438 sgi = link_sta->ht_cap.ht_supported && 439 (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); 440 break; 441 } 442 443 if (link_sta->he_cap.he_cap_elem.phy_cap_info[3] & 444 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM) 445 ra->dcm_cap = 1; 446 447 if (rate_pattern->enable && !p2p) { 448 ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta_link, link_sta, chan); 449 ra_mask &= rate_pattern->ra_mask; 450 mode = rate_pattern->ra_mode; 451 } 452 453 ra->bw_cap = bw_mode; 454 ra->er_cap = rtwsta_link->er_cap; 455 ra->mode_ctrl = mode; 456 ra->macid = rtwsta_link->mac_id; 457 ra->stbc_cap = stbc_en; 458 ra->ldpc_cap = ldpc_en; 459 ra->ss_num = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 460 ra->en_sgi = sgi; 461 ra->ra_mask = ra_mask; 462 ra->fix_giltf_en = fix_giltf_en; 463 ra->fix_giltf = fix_giltf; 464 465 if (!csi) 466 return; 467 468 ra->fixed_csi_rate_en = false; 469 ra->ra_csi_rate_en = true; 470 ra->cr_tbl_sel = false; 471 ra->band_num = rtwvif_link->phy_idx; 472 ra->csi_bw = bw_mode; 473 ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32; 474 ra->csi_mcs_ss_idx = 5; 475 ra->csi_mode = csi_mode; 476 } 477 478 void rtw89_phy_ra_update_sta_link(struct rtw89_dev *rtwdev, 479 struct rtw89_sta_link *rtwsta_link, 480 u32 changed) 481 { 482 struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link; 483 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 484 struct rtw89_ra_info *ra = &rtwsta_link->ra; 485 struct ieee80211_link_sta *link_sta; 486 487 rcu_read_lock(); 488 489 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 490 rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link, 491 link_sta, vif->p2p, false); 492 493 rcu_read_unlock(); 494 495 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) 496 ra->upd_mask = 1; 497 if (changed & (IEEE80211_RC_BW_CHANGED | IEEE80211_RC_NSS_CHANGED)) 498 ra->upd_bw_nss_mask = 1; 499 500 rtw89_debug(rtwdev, RTW89_DBG_RA, 501 "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d", 502 ra->macid, 503 ra->bw_cap, 504 ra->ss_num, 505 ra->en_sgi, 506 ra->giltf); 507 508 rtw89_fw_h2c_ra(rtwdev, ra, false); 509 } 510 511 void rtw89_phy_ra_update_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta, 512 u32 changed) 513 { 514 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 515 struct rtw89_sta_link *rtwsta_link; 516 unsigned int link_id; 517 518 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) 519 rtw89_phy_ra_update_sta_link(rtwdev, rtwsta_link, changed); 520 } 521 522 static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next, 523 u16 rate_base, u64 ra_mask, u8 ra_mode, 524 u32 rate_ctrl, u32 ctrl_skip, bool force) 525 { 526 u8 n, c; 527 528 if (rate_ctrl == ctrl_skip) 529 return true; 530 531 n = hweight32(rate_ctrl); 532 if (n == 0) 533 return true; 534 535 if (force && n != 1) 536 return false; 537 538 if (next->enable) 539 return false; 540 541 c = __fls(rate_ctrl); 542 next->rate = rate_base + c; 543 next->ra_mode = ra_mode; 544 next->ra_mask = ra_mask; 545 next->enable = true; 546 547 return true; 548 } 549 550 #define RTW89_HW_RATE_BY_CHIP_GEN(rate) \ 551 { \ 552 [RTW89_CHIP_AX] = RTW89_HW_RATE_ ## rate, \ 553 [RTW89_CHIP_BE] = RTW89_HW_RATE_V1_ ## rate, \ 554 } 555 556 static 557 void __rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, 558 struct rtw89_vif_link *rtwvif_link, 559 const struct cfg80211_bitrate_mask *mask) 560 { 561 struct ieee80211_supported_band *sband; 562 struct rtw89_phy_rate_pattern next_pattern = {0}; 563 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 564 rtwvif_link->chanctx_idx); 565 static const u16 hw_rate_he[][RTW89_CHIP_GEN_NUM] = { 566 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS1_MCS0), 567 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS2_MCS0), 568 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS3_MCS0), 569 RTW89_HW_RATE_BY_CHIP_GEN(HE_NSS4_MCS0), 570 }; 571 static const u16 hw_rate_vht[][RTW89_CHIP_GEN_NUM] = { 572 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS1_MCS0), 573 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS2_MCS0), 574 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS3_MCS0), 575 RTW89_HW_RATE_BY_CHIP_GEN(VHT_NSS4_MCS0), 576 }; 577 static const u16 hw_rate_ht[][RTW89_CHIP_GEN_NUM] = { 578 RTW89_HW_RATE_BY_CHIP_GEN(MCS0), 579 RTW89_HW_RATE_BY_CHIP_GEN(MCS8), 580 RTW89_HW_RATE_BY_CHIP_GEN(MCS16), 581 RTW89_HW_RATE_BY_CHIP_GEN(MCS24), 582 }; 583 u8 band = chan->band_type; 584 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 585 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 586 u8 tx_nss = rtwdev->hal.tx_nss; 587 u8 i; 588 589 for (i = 0; i < tx_nss; i++) 590 if (!__check_rate_pattern(&next_pattern, hw_rate_he[i][chip_gen], 591 RA_MASK_HE_RATES, RTW89_RA_MODE_HE, 592 mask->control[nl_band].he_mcs[i], 593 0, true)) 594 goto out; 595 596 for (i = 0; i < tx_nss; i++) 597 if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i][chip_gen], 598 RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT, 599 mask->control[nl_band].vht_mcs[i], 600 0, true)) 601 goto out; 602 603 for (i = 0; i < tx_nss; i++) 604 if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i][chip_gen], 605 RA_MASK_HT_RATES, RTW89_RA_MODE_HT, 606 mask->control[nl_band].ht_mcs[i], 607 0, true)) 608 goto out; 609 610 /* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and 611 * require at least one basic rate for ieee80211_set_bitrate_mask, 612 * so the decision just depends on if all bitrates are set or not. 613 */ 614 sband = rtwdev->hw->wiphy->bands[nl_band]; 615 if (band == RTW89_BAND_2G) { 616 if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1, 617 RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES, 618 RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM, 619 mask->control[nl_band].legacy, 620 BIT(sband->n_bitrates) - 1, false)) 621 goto out; 622 } else { 623 if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6, 624 RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM, 625 mask->control[nl_band].legacy, 626 BIT(sband->n_bitrates) - 1, false)) 627 goto out; 628 } 629 630 if (!next_pattern.enable) 631 goto out; 632 633 rtwvif_link->rate_pattern = next_pattern; 634 rtw89_debug(rtwdev, RTW89_DBG_RA, 635 "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n", 636 next_pattern.rate, 637 next_pattern.ra_mask, 638 next_pattern.ra_mode); 639 return; 640 641 out: 642 rtwvif_link->rate_pattern.enable = false; 643 rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n"); 644 } 645 646 void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev, 647 struct ieee80211_vif *vif, 648 const struct cfg80211_bitrate_mask *mask) 649 { 650 struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); 651 struct rtw89_vif_link *rtwvif_link; 652 unsigned int link_id; 653 654 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 655 __rtw89_phy_rate_pattern_vif(rtwdev, rtwvif_link, mask); 656 } 657 658 static void rtw89_phy_ra_update_sta_iter(void *data, struct ieee80211_sta *sta) 659 { 660 struct rtw89_dev *rtwdev = (struct rtw89_dev *)data; 661 662 rtw89_phy_ra_update_sta(rtwdev, sta, IEEE80211_RC_SUPP_RATES_CHANGED); 663 } 664 665 void rtw89_phy_ra_update(struct rtw89_dev *rtwdev) 666 { 667 ieee80211_iterate_stations_atomic(rtwdev->hw, 668 rtw89_phy_ra_update_sta_iter, 669 rtwdev); 670 } 671 672 void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link) 673 { 674 struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link; 675 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 676 struct rtw89_ra_info *ra = &rtwsta_link->ra; 677 u8 rssi = ewma_rssi_read(&rtwsta_link->avg_rssi) >> RSSI_FACTOR; 678 struct ieee80211_link_sta *link_sta; 679 bool csi; 680 681 rcu_read_lock(); 682 683 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 684 csi = rtw89_sta_has_beamformer_cap(link_sta); 685 686 rtw89_phy_ra_sta_update(rtwdev, rtwvif_link, rtwsta_link, 687 link_sta, vif->p2p, csi); 688 689 rcu_read_unlock(); 690 691 if (rssi > 40) 692 ra->init_rate_lv = 1; 693 else if (rssi > 20) 694 ra->init_rate_lv = 2; 695 else if (rssi > 1) 696 ra->init_rate_lv = 3; 697 else 698 ra->init_rate_lv = 0; 699 ra->upd_all = 1; 700 rtw89_debug(rtwdev, RTW89_DBG_RA, 701 "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d", 702 ra->macid, 703 ra->mode_ctrl, 704 ra->bw_cap, 705 ra->ss_num, 706 ra->init_rate_lv); 707 rtw89_debug(rtwdev, RTW89_DBG_RA, 708 "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d", 709 ra->dcm_cap, 710 ra->er_cap, 711 ra->ldpc_cap, 712 ra->stbc_cap, 713 ra->en_sgi, 714 ra->giltf); 715 716 rtw89_fw_h2c_ra(rtwdev, ra, csi); 717 } 718 719 u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev, 720 const struct rtw89_chan *chan, 721 enum rtw89_bandwidth dbw) 722 { 723 enum rtw89_bandwidth cbw = chan->band_width; 724 u8 pri_ch = chan->primary_channel; 725 u8 central_ch = chan->channel; 726 u8 txsc_idx = 0; 727 u8 tmp = 0; 728 729 if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) 730 return txsc_idx; 731 732 switch (cbw) { 733 case RTW89_CHANNEL_WIDTH_40: 734 txsc_idx = pri_ch > central_ch ? 1 : 2; 735 break; 736 case RTW89_CHANNEL_WIDTH_80: 737 if (dbw == RTW89_CHANNEL_WIDTH_20) { 738 if (pri_ch > central_ch) 739 txsc_idx = (pri_ch - central_ch) >> 1; 740 else 741 txsc_idx = ((central_ch - pri_ch) >> 1) + 1; 742 } else { 743 txsc_idx = pri_ch > central_ch ? 9 : 10; 744 } 745 break; 746 case RTW89_CHANNEL_WIDTH_160: 747 if (pri_ch > central_ch) 748 tmp = (pri_ch - central_ch) >> 1; 749 else 750 tmp = ((central_ch - pri_ch) >> 1) + 1; 751 752 if (dbw == RTW89_CHANNEL_WIDTH_20) { 753 txsc_idx = tmp; 754 } else if (dbw == RTW89_CHANNEL_WIDTH_40) { 755 if (tmp == 1 || tmp == 3) 756 txsc_idx = 9; 757 else if (tmp == 5 || tmp == 7) 758 txsc_idx = 11; 759 else if (tmp == 2 || tmp == 4) 760 txsc_idx = 10; 761 else if (tmp == 6 || tmp == 8) 762 txsc_idx = 12; 763 else 764 return 0xff; 765 } else { 766 txsc_idx = pri_ch > central_ch ? 13 : 14; 767 } 768 break; 769 case RTW89_CHANNEL_WIDTH_80_80: 770 if (dbw == RTW89_CHANNEL_WIDTH_20) { 771 if (pri_ch > central_ch) 772 txsc_idx = (10 - (pri_ch - central_ch)) >> 1; 773 else 774 txsc_idx = ((central_ch - pri_ch) >> 1) + 5; 775 } else if (dbw == RTW89_CHANNEL_WIDTH_40) { 776 txsc_idx = pri_ch > central_ch ? 10 : 12; 777 } else { 778 txsc_idx = 14; 779 } 780 break; 781 default: 782 break; 783 } 784 785 return txsc_idx; 786 } 787 EXPORT_SYMBOL(rtw89_phy_get_txsc); 788 789 u8 rtw89_phy_get_txsb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, 790 enum rtw89_bandwidth dbw) 791 { 792 enum rtw89_bandwidth cbw = chan->band_width; 793 u8 pri_ch = chan->primary_channel; 794 u8 central_ch = chan->channel; 795 u8 txsb_idx = 0; 796 797 if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20) 798 return txsb_idx; 799 800 switch (cbw) { 801 case RTW89_CHANNEL_WIDTH_40: 802 txsb_idx = pri_ch > central_ch ? 1 : 0; 803 break; 804 case RTW89_CHANNEL_WIDTH_80: 805 if (dbw == RTW89_CHANNEL_WIDTH_20) 806 txsb_idx = (pri_ch - central_ch + 6) / 4; 807 else 808 txsb_idx = pri_ch > central_ch ? 1 : 0; 809 break; 810 case RTW89_CHANNEL_WIDTH_160: 811 if (dbw == RTW89_CHANNEL_WIDTH_20) 812 txsb_idx = (pri_ch - central_ch + 14) / 4; 813 else if (dbw == RTW89_CHANNEL_WIDTH_40) 814 txsb_idx = (pri_ch - central_ch + 12) / 8; 815 else 816 txsb_idx = pri_ch > central_ch ? 1 : 0; 817 break; 818 case RTW89_CHANNEL_WIDTH_320: 819 if (dbw == RTW89_CHANNEL_WIDTH_20) 820 txsb_idx = (pri_ch - central_ch + 30) / 4; 821 else if (dbw == RTW89_CHANNEL_WIDTH_40) 822 txsb_idx = (pri_ch - central_ch + 28) / 8; 823 else if (dbw == RTW89_CHANNEL_WIDTH_80) 824 txsb_idx = (pri_ch - central_ch + 24) / 16; 825 else 826 txsb_idx = pri_ch > central_ch ? 1 : 0; 827 break; 828 default: 829 break; 830 } 831 832 return txsb_idx; 833 } 834 EXPORT_SYMBOL(rtw89_phy_get_txsb); 835 836 static bool rtw89_phy_check_swsi_busy(struct rtw89_dev *rtwdev) 837 { 838 return !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_W_BUSY_V1) || 839 !!rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, B_SWSI_R_BUSY_V1); 840 } 841 842 u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 843 u32 addr, u32 mask) 844 { 845 const struct rtw89_chip_info *chip = rtwdev->chip; 846 const u32 *base_addr = chip->rf_base_addr; 847 u32 val, direct_addr; 848 849 if (rf_path >= rtwdev->chip->rf_path_num) { 850 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 851 return INV_RF_DATA; 852 } 853 854 addr &= 0xff; 855 direct_addr = base_addr[rf_path] + (addr << 2); 856 mask &= RFREG_MASK; 857 858 val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask); 859 860 return val; 861 } 862 EXPORT_SYMBOL(rtw89_phy_read_rf); 863 864 static u32 rtw89_phy_read_rf_a(struct rtw89_dev *rtwdev, 865 enum rtw89_rf_path rf_path, u32 addr, u32 mask) 866 { 867 bool busy; 868 bool done; 869 u32 val; 870 int ret; 871 872 ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy, 873 1, 30, false, rtwdev); 874 if (ret) { 875 rtw89_err(rtwdev, "read rf busy swsi\n"); 876 return INV_RF_DATA; 877 } 878 879 mask &= RFREG_MASK; 880 881 val = FIELD_PREP(B_SWSI_READ_ADDR_PATH_V1, rf_path) | 882 FIELD_PREP(B_SWSI_READ_ADDR_ADDR_V1, addr); 883 rtw89_phy_write32_mask(rtwdev, R_SWSI_READ_ADDR_V1, B_SWSI_READ_ADDR_V1, val); 884 udelay(2); 885 886 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 1, 887 30, false, rtwdev, R_SWSI_V1, 888 B_SWSI_R_DATA_DONE_V1); 889 if (ret) { 890 rtw89_err(rtwdev, "read swsi busy\n"); 891 return INV_RF_DATA; 892 } 893 894 return rtw89_phy_read32_mask(rtwdev, R_SWSI_V1, mask); 895 } 896 897 u32 rtw89_phy_read_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 898 u32 addr, u32 mask) 899 { 900 bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr); 901 902 if (rf_path >= rtwdev->chip->rf_path_num) { 903 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 904 return INV_RF_DATA; 905 } 906 907 if (ad_sel) 908 return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask); 909 else 910 return rtw89_phy_read_rf_a(rtwdev, rf_path, addr, mask); 911 } 912 EXPORT_SYMBOL(rtw89_phy_read_rf_v1); 913 914 static u32 rtw89_phy_read_full_rf_v2_a(struct rtw89_dev *rtwdev, 915 enum rtw89_rf_path rf_path, u32 addr) 916 { 917 static const u16 r_addr_ofst[2] = {0x2C24, 0x2D24}; 918 static const u16 addr_ofst[2] = {0x2ADC, 0x2BDC}; 919 bool busy, done; 920 int ret; 921 u32 val; 922 923 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_CTL_MASK, 0x1); 924 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy, 925 1, 3800, false, 926 rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_BUSY); 927 if (ret) { 928 rtw89_warn(rtwdev, "poll HWSI is busy\n"); 929 return INV_RF_DATA; 930 } 931 932 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_MASK, addr); 933 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_RD, 0x1); 934 udelay(2); 935 936 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, done, done, 937 1, 3800, false, 938 rtwdev, r_addr_ofst[rf_path], B_HWSI_VAL_RDONE); 939 if (ret) { 940 rtw89_warn(rtwdev, "read HWSI is busy\n"); 941 val = INV_RF_DATA; 942 goto out; 943 } 944 945 val = rtw89_phy_read32_mask(rtwdev, r_addr_ofst[rf_path], RFREG_MASK); 946 out: 947 rtw89_phy_write32_mask(rtwdev, addr_ofst[rf_path], B_HWSI_ADD_POLL_MASK, 0); 948 949 return val; 950 } 951 952 static u32 rtw89_phy_read_rf_v2_a(struct rtw89_dev *rtwdev, 953 enum rtw89_rf_path rf_path, u32 addr, u32 mask) 954 { 955 u32 val; 956 957 val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr); 958 959 return (val & mask) >> __ffs(mask); 960 } 961 962 u32 rtw89_phy_read_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 963 u32 addr, u32 mask) 964 { 965 bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK); 966 967 if (rf_path >= rtwdev->chip->rf_path_num) { 968 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 969 return INV_RF_DATA; 970 } 971 972 if (ad_sel) 973 return rtw89_phy_read_rf(rtwdev, rf_path, addr, mask); 974 else 975 return rtw89_phy_read_rf_v2_a(rtwdev, rf_path, addr, mask); 976 } 977 EXPORT_SYMBOL(rtw89_phy_read_rf_v2); 978 979 bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 980 u32 addr, u32 mask, u32 data) 981 { 982 const struct rtw89_chip_info *chip = rtwdev->chip; 983 const u32 *base_addr = chip->rf_base_addr; 984 u32 direct_addr; 985 986 if (rf_path >= rtwdev->chip->rf_path_num) { 987 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 988 return false; 989 } 990 991 addr &= 0xff; 992 direct_addr = base_addr[rf_path] + (addr << 2); 993 mask &= RFREG_MASK; 994 995 rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data); 996 997 /* delay to ensure writing properly */ 998 udelay(1); 999 1000 return true; 1001 } 1002 EXPORT_SYMBOL(rtw89_phy_write_rf); 1003 1004 static bool rtw89_phy_write_rf_a(struct rtw89_dev *rtwdev, 1005 enum rtw89_rf_path rf_path, u32 addr, u32 mask, 1006 u32 data) 1007 { 1008 u8 bit_shift; 1009 u32 val; 1010 bool busy, b_msk_en = false; 1011 int ret; 1012 1013 ret = read_poll_timeout_atomic(rtw89_phy_check_swsi_busy, busy, !busy, 1014 1, 30, false, rtwdev); 1015 if (ret) { 1016 rtw89_err(rtwdev, "write rf busy swsi\n"); 1017 return false; 1018 } 1019 1020 data &= RFREG_MASK; 1021 mask &= RFREG_MASK; 1022 1023 if (mask != RFREG_MASK) { 1024 b_msk_en = true; 1025 rtw89_phy_write32_mask(rtwdev, R_SWSI_BIT_MASK_V1, RFREG_MASK, 1026 mask); 1027 bit_shift = __ffs(mask); 1028 data = (data << bit_shift) & RFREG_MASK; 1029 } 1030 1031 val = FIELD_PREP(B_SWSI_DATA_BIT_MASK_EN_V1, b_msk_en) | 1032 FIELD_PREP(B_SWSI_DATA_PATH_V1, rf_path) | 1033 FIELD_PREP(B_SWSI_DATA_ADDR_V1, addr) | 1034 FIELD_PREP(B_SWSI_DATA_VAL_V1, data); 1035 1036 rtw89_phy_write32_mask(rtwdev, R_SWSI_DATA_V1, MASKDWORD, val); 1037 1038 return true; 1039 } 1040 1041 bool rtw89_phy_write_rf_v1(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1042 u32 addr, u32 mask, u32 data) 1043 { 1044 bool ad_sel = FIELD_GET(RTW89_RF_ADDR_ADSEL_MASK, addr); 1045 1046 if (rf_path >= rtwdev->chip->rf_path_num) { 1047 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 1048 return false; 1049 } 1050 1051 if (ad_sel) 1052 return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data); 1053 else 1054 return rtw89_phy_write_rf_a(rtwdev, rf_path, addr, mask, data); 1055 } 1056 EXPORT_SYMBOL(rtw89_phy_write_rf_v1); 1057 1058 static 1059 bool rtw89_phy_write_full_rf_v2_a(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1060 u32 addr, u32 data) 1061 { 1062 static const u32 addr_is_idle[2] = {0x2C24, 0x2D24}; 1063 static const u32 addr_ofst[2] = {0x2AE0, 0x2BE0}; 1064 bool busy; 1065 u32 val; 1066 int ret; 1067 1068 ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, busy, !busy, 1069 1, 3800, false, 1070 rtwdev, addr_is_idle[rf_path], BIT(29)); 1071 if (ret) { 1072 rtw89_warn(rtwdev, "[%s] HWSI is busy\n", __func__); 1073 return false; 1074 } 1075 1076 val = u32_encode_bits(addr, B_HWSI_DATA_ADDR) | 1077 u32_encode_bits(data, B_HWSI_DATA_VAL); 1078 1079 rtw89_phy_write32(rtwdev, addr_ofst[rf_path], val); 1080 1081 return true; 1082 } 1083 1084 static 1085 bool rtw89_phy_write_rf_a_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1086 u32 addr, u32 mask, u32 data) 1087 { 1088 u32 val; 1089 1090 if (mask == RFREG_MASK) { 1091 val = data; 1092 } else { 1093 val = rtw89_phy_read_full_rf_v2_a(rtwdev, rf_path, addr); 1094 val &= ~mask; 1095 val |= (data << __ffs(mask)) & mask; 1096 } 1097 1098 return rtw89_phy_write_full_rf_v2_a(rtwdev, rf_path, addr, val); 1099 } 1100 1101 bool rtw89_phy_write_rf_v2(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path, 1102 u32 addr, u32 mask, u32 data) 1103 { 1104 bool ad_sel = u32_get_bits(addr, RTW89_RF_ADDR_ADSEL_MASK); 1105 1106 if (rf_path >= rtwdev->chip->rf_path_num) { 1107 rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path); 1108 return INV_RF_DATA; 1109 } 1110 1111 if (ad_sel) 1112 return rtw89_phy_write_rf(rtwdev, rf_path, addr, mask, data); 1113 else 1114 return rtw89_phy_write_rf_a_v2(rtwdev, rf_path, addr, mask, data); 1115 } 1116 EXPORT_SYMBOL(rtw89_phy_write_rf_v2); 1117 1118 static bool rtw89_chip_rf_v1(struct rtw89_dev *rtwdev) 1119 { 1120 return rtwdev->chip->ops->write_rf == rtw89_phy_write_rf_v1; 1121 } 1122 1123 static void __rtw89_phy_bb_reset(struct rtw89_dev *rtwdev, 1124 enum rtw89_phy_idx phy_idx) 1125 { 1126 const struct rtw89_chip_info *chip = rtwdev->chip; 1127 1128 chip->ops->bb_reset(rtwdev, phy_idx); 1129 } 1130 1131 static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev) 1132 { 1133 __rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0); 1134 if (rtwdev->dbcc_en) 1135 __rtw89_phy_bb_reset(rtwdev, RTW89_PHY_1); 1136 } 1137 1138 static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev, 1139 const struct rtw89_reg2_def *reg, 1140 enum rtw89_rf_path rf_path, 1141 void *extra_data) 1142 { 1143 u32 addr; 1144 1145 if (reg->addr == 0xfe) { 1146 mdelay(50); 1147 } else if (reg->addr == 0xfd) { 1148 mdelay(5); 1149 } else if (reg->addr == 0xfc) { 1150 mdelay(1); 1151 } else if (reg->addr == 0xfb) { 1152 udelay(50); 1153 } else if (reg->addr == 0xfa) { 1154 udelay(5); 1155 } else if (reg->addr == 0xf9) { 1156 udelay(1); 1157 } else if (reg->data == BYPASS_CR_DATA) { 1158 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Bypass CR 0x%x\n", reg->addr); 1159 } else { 1160 addr = reg->addr; 1161 1162 if ((uintptr_t)extra_data == RTW89_PHY_1) 1163 addr += rtw89_phy0_phy1_offset(rtwdev, reg->addr); 1164 1165 rtw89_phy_write32(rtwdev, addr, reg->data); 1166 } 1167 } 1168 1169 union rtw89_phy_bb_gain_arg { 1170 u32 addr; 1171 struct { 1172 union { 1173 u8 type; 1174 struct { 1175 u8 rxsc_start:4; 1176 u8 bw:4; 1177 }; 1178 }; 1179 u8 path; 1180 u8 gain_band; 1181 u8 cfg_type; 1182 }; 1183 } __packed; 1184 1185 static void 1186 rtw89_phy_cfg_bb_gain_error(struct rtw89_dev *rtwdev, 1187 union rtw89_phy_bb_gain_arg arg, u32 data) 1188 { 1189 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1190 u8 type = arg.type; 1191 u8 path = arg.path; 1192 u8 gband = arg.gain_band; 1193 int i; 1194 1195 switch (type) { 1196 case 0: 1197 for (i = 0; i < 4; i++, data >>= 8) 1198 gain->lna_gain[gband][path][i] = data & 0xff; 1199 break; 1200 case 1: 1201 for (i = 4; i < 7; i++, data >>= 8) 1202 gain->lna_gain[gband][path][i] = data & 0xff; 1203 break; 1204 case 2: 1205 for (i = 0; i < 2; i++, data >>= 8) 1206 gain->tia_gain[gband][path][i] = data & 0xff; 1207 break; 1208 default: 1209 rtw89_warn(rtwdev, 1210 "bb gain error {0x%x:0x%x} with unknown type: %d\n", 1211 arg.addr, data, type); 1212 break; 1213 } 1214 } 1215 1216 enum rtw89_phy_bb_rxsc_start_idx { 1217 RTW89_BB_RXSC_START_IDX_FULL = 0, 1218 RTW89_BB_RXSC_START_IDX_20 = 1, 1219 RTW89_BB_RXSC_START_IDX_20_1 = 5, 1220 RTW89_BB_RXSC_START_IDX_40 = 9, 1221 RTW89_BB_RXSC_START_IDX_80 = 13, 1222 }; 1223 1224 static void 1225 rtw89_phy_cfg_bb_rpl_ofst(struct rtw89_dev *rtwdev, 1226 union rtw89_phy_bb_gain_arg arg, u32 data) 1227 { 1228 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1229 u8 rxsc_start = arg.rxsc_start; 1230 u8 bw = arg.bw; 1231 u8 path = arg.path; 1232 u8 gband = arg.gain_band; 1233 u8 rxsc; 1234 s8 ofst; 1235 int i; 1236 1237 switch (bw) { 1238 case RTW89_CHANNEL_WIDTH_20: 1239 gain->rpl_ofst_20[gband][path] = (s8)data; 1240 break; 1241 case RTW89_CHANNEL_WIDTH_40: 1242 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1243 gain->rpl_ofst_40[gband][path][0] = (s8)data; 1244 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1245 for (i = 0; i < 2; i++, data >>= 8) { 1246 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1247 ofst = (s8)(data & 0xff); 1248 gain->rpl_ofst_40[gband][path][rxsc] = ofst; 1249 } 1250 } 1251 break; 1252 case RTW89_CHANNEL_WIDTH_80: 1253 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1254 gain->rpl_ofst_80[gband][path][0] = (s8)data; 1255 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1256 for (i = 0; i < 4; i++, data >>= 8) { 1257 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1258 ofst = (s8)(data & 0xff); 1259 gain->rpl_ofst_80[gband][path][rxsc] = ofst; 1260 } 1261 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { 1262 for (i = 0; i < 2; i++, data >>= 8) { 1263 rxsc = RTW89_BB_RXSC_START_IDX_40 + i; 1264 ofst = (s8)(data & 0xff); 1265 gain->rpl_ofst_80[gband][path][rxsc] = ofst; 1266 } 1267 } 1268 break; 1269 case RTW89_CHANNEL_WIDTH_160: 1270 if (rxsc_start == RTW89_BB_RXSC_START_IDX_FULL) { 1271 gain->rpl_ofst_160[gband][path][0] = (s8)data; 1272 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20) { 1273 for (i = 0; i < 4; i++, data >>= 8) { 1274 rxsc = RTW89_BB_RXSC_START_IDX_20 + i; 1275 ofst = (s8)(data & 0xff); 1276 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1277 } 1278 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_20_1) { 1279 for (i = 0; i < 4; i++, data >>= 8) { 1280 rxsc = RTW89_BB_RXSC_START_IDX_20_1 + i; 1281 ofst = (s8)(data & 0xff); 1282 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1283 } 1284 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_40) { 1285 for (i = 0; i < 4; i++, data >>= 8) { 1286 rxsc = RTW89_BB_RXSC_START_IDX_40 + i; 1287 ofst = (s8)(data & 0xff); 1288 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1289 } 1290 } else if (rxsc_start == RTW89_BB_RXSC_START_IDX_80) { 1291 for (i = 0; i < 2; i++, data >>= 8) { 1292 rxsc = RTW89_BB_RXSC_START_IDX_80 + i; 1293 ofst = (s8)(data & 0xff); 1294 gain->rpl_ofst_160[gband][path][rxsc] = ofst; 1295 } 1296 } 1297 break; 1298 default: 1299 rtw89_warn(rtwdev, 1300 "bb rpl ofst {0x%x:0x%x} with unknown bw: %d\n", 1301 arg.addr, data, bw); 1302 break; 1303 } 1304 } 1305 1306 static void 1307 rtw89_phy_cfg_bb_gain_bypass(struct rtw89_dev *rtwdev, 1308 union rtw89_phy_bb_gain_arg arg, u32 data) 1309 { 1310 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1311 u8 type = arg.type; 1312 u8 path = arg.path; 1313 u8 gband = arg.gain_band; 1314 int i; 1315 1316 switch (type) { 1317 case 0: 1318 for (i = 0; i < 4; i++, data >>= 8) 1319 gain->lna_gain_bypass[gband][path][i] = data & 0xff; 1320 break; 1321 case 1: 1322 for (i = 4; i < 7; i++, data >>= 8) 1323 gain->lna_gain_bypass[gband][path][i] = data & 0xff; 1324 break; 1325 default: 1326 rtw89_warn(rtwdev, 1327 "bb gain bypass {0x%x:0x%x} with unknown type: %d\n", 1328 arg.addr, data, type); 1329 break; 1330 } 1331 } 1332 1333 static void 1334 rtw89_phy_cfg_bb_gain_op1db(struct rtw89_dev *rtwdev, 1335 union rtw89_phy_bb_gain_arg arg, u32 data) 1336 { 1337 struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; 1338 u8 type = arg.type; 1339 u8 path = arg.path; 1340 u8 gband = arg.gain_band; 1341 int i; 1342 1343 switch (type) { 1344 case 0: 1345 for (i = 0; i < 4; i++, data >>= 8) 1346 gain->lna_op1db[gband][path][i] = data & 0xff; 1347 break; 1348 case 1: 1349 for (i = 4; i < 7; i++, data >>= 8) 1350 gain->lna_op1db[gband][path][i] = data & 0xff; 1351 break; 1352 case 2: 1353 for (i = 0; i < 4; i++, data >>= 8) 1354 gain->tia_lna_op1db[gband][path][i] = data & 0xff; 1355 break; 1356 case 3: 1357 for (i = 4; i < 8; i++, data >>= 8) 1358 gain->tia_lna_op1db[gband][path][i] = data & 0xff; 1359 break; 1360 default: 1361 rtw89_warn(rtwdev, 1362 "bb gain op1db {0x%x:0x%x} with unknown type: %d\n", 1363 arg.addr, data, type); 1364 break; 1365 } 1366 } 1367 1368 static void rtw89_phy_config_bb_gain_ax(struct rtw89_dev *rtwdev, 1369 const struct rtw89_reg2_def *reg, 1370 enum rtw89_rf_path rf_path, 1371 void *extra_data) 1372 { 1373 const struct rtw89_chip_info *chip = rtwdev->chip; 1374 union rtw89_phy_bb_gain_arg arg = { .addr = reg->addr }; 1375 struct rtw89_efuse *efuse = &rtwdev->efuse; 1376 1377 if (arg.gain_band >= RTW89_BB_GAIN_BAND_NR) 1378 return; 1379 1380 if (arg.path >= chip->rf_path_num) 1381 return; 1382 1383 if (arg.addr >= 0xf9 && arg.addr <= 0xfe) { 1384 rtw89_warn(rtwdev, "bb gain table with flow ctrl\n"); 1385 return; 1386 } 1387 1388 switch (arg.cfg_type) { 1389 case 0: 1390 rtw89_phy_cfg_bb_gain_error(rtwdev, arg, reg->data); 1391 break; 1392 case 1: 1393 rtw89_phy_cfg_bb_rpl_ofst(rtwdev, arg, reg->data); 1394 break; 1395 case 2: 1396 rtw89_phy_cfg_bb_gain_bypass(rtwdev, arg, reg->data); 1397 break; 1398 case 3: 1399 rtw89_phy_cfg_bb_gain_op1db(rtwdev, arg, reg->data); 1400 break; 1401 case 4: 1402 /* This cfg_type is only used by rfe_type >= 50 with eFEM */ 1403 if (efuse->rfe_type < 50) 1404 break; 1405 fallthrough; 1406 default: 1407 rtw89_warn(rtwdev, 1408 "bb gain {0x%x:0x%x} with unknown cfg type: %d\n", 1409 arg.addr, reg->data, arg.cfg_type); 1410 break; 1411 } 1412 } 1413 1414 static void 1415 rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev, 1416 const struct rtw89_reg2_def *reg, 1417 enum rtw89_rf_path rf_path, 1418 struct rtw89_fw_h2c_rf_reg_info *info) 1419 { 1420 u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE; 1421 u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE; 1422 1423 if (page >= RTW89_H2C_RF_PAGE_NUM) { 1424 rtw89_warn(rtwdev, "RF parameters exceed size. path=%d, idx=%d", 1425 rf_path, info->curr_idx); 1426 return; 1427 } 1428 1429 info->rtw89_phy_config_rf_h2c[page][idx] = 1430 cpu_to_le32((reg->addr << 20) | reg->data); 1431 info->curr_idx++; 1432 } 1433 1434 static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev, 1435 struct rtw89_fw_h2c_rf_reg_info *info) 1436 { 1437 u16 remain = info->curr_idx; 1438 u16 len = 0; 1439 u8 i; 1440 int ret = 0; 1441 1442 if (remain > RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE) { 1443 rtw89_warn(rtwdev, 1444 "rf reg h2c total len %d larger than %d\n", 1445 remain, RTW89_H2C_RF_PAGE_NUM * RTW89_H2C_RF_PAGE_SIZE); 1446 ret = -EINVAL; 1447 goto out; 1448 } 1449 1450 for (i = 0; i < RTW89_H2C_RF_PAGE_NUM && remain; i++, remain -= len) { 1451 len = remain > RTW89_H2C_RF_PAGE_SIZE ? RTW89_H2C_RF_PAGE_SIZE : remain; 1452 ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len * 4, i); 1453 if (ret) 1454 goto out; 1455 } 1456 out: 1457 info->curr_idx = 0; 1458 1459 return ret; 1460 } 1461 1462 static void rtw89_phy_config_rf_reg_noio(struct rtw89_dev *rtwdev, 1463 const struct rtw89_reg2_def *reg, 1464 enum rtw89_rf_path rf_path, 1465 void *extra_data) 1466 { 1467 u32 addr = reg->addr; 1468 1469 if (addr == 0xfe || addr == 0xfd || addr == 0xfc || addr == 0xfb || 1470 addr == 0xfa || addr == 0xf9) 1471 return; 1472 1473 if (rtw89_chip_rf_v1(rtwdev) && addr < 0x100) 1474 return; 1475 1476 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1477 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1478 } 1479 1480 static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev, 1481 const struct rtw89_reg2_def *reg, 1482 enum rtw89_rf_path rf_path, 1483 void *extra_data) 1484 { 1485 if (reg->addr == 0xfe) { 1486 mdelay(50); 1487 } else if (reg->addr == 0xfd) { 1488 mdelay(5); 1489 } else if (reg->addr == 0xfc) { 1490 mdelay(1); 1491 } else if (reg->addr == 0xfb) { 1492 udelay(50); 1493 } else if (reg->addr == 0xfa) { 1494 udelay(5); 1495 } else if (reg->addr == 0xf9) { 1496 udelay(1); 1497 } else { 1498 rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data); 1499 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1500 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1501 } 1502 } 1503 1504 void rtw89_phy_config_rf_reg_v1(struct rtw89_dev *rtwdev, 1505 const struct rtw89_reg2_def *reg, 1506 enum rtw89_rf_path rf_path, 1507 void *extra_data) 1508 { 1509 rtw89_write_rf(rtwdev, rf_path, reg->addr, RFREG_MASK, reg->data); 1510 1511 if (reg->addr < 0x100) 1512 return; 1513 1514 rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path, 1515 (struct rtw89_fw_h2c_rf_reg_info *)extra_data); 1516 } 1517 EXPORT_SYMBOL(rtw89_phy_config_rf_reg_v1); 1518 1519 static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev, 1520 const struct rtw89_phy_table *table, 1521 u32 *headline_size, u32 *headline_idx, 1522 u8 rfe, u8 cv) 1523 { 1524 const struct rtw89_reg2_def *reg; 1525 u32 headline; 1526 u32 compare, target; 1527 u8 rfe_para, cv_para; 1528 u8 cv_max = 0; 1529 bool case_matched = false; 1530 u32 i; 1531 1532 for (i = 0; i < table->n_regs; i++) { 1533 reg = &table->regs[i]; 1534 headline = get_phy_headline(reg->addr); 1535 if (headline != PHY_HEADLINE_VALID) 1536 break; 1537 } 1538 *headline_size = i; 1539 if (*headline_size == 0) 1540 return 0; 1541 1542 /* case 1: RFE match, CV match */ 1543 compare = get_phy_compare(rfe, cv); 1544 for (i = 0; i < *headline_size; i++) { 1545 reg = &table->regs[i]; 1546 target = get_phy_target(reg->addr); 1547 if (target == compare) { 1548 *headline_idx = i; 1549 return 0; 1550 } 1551 } 1552 1553 /* case 2: RFE match, CV don't care */ 1554 compare = get_phy_compare(rfe, PHY_COND_DONT_CARE); 1555 for (i = 0; i < *headline_size; i++) { 1556 reg = &table->regs[i]; 1557 target = get_phy_target(reg->addr); 1558 if (target == compare) { 1559 *headline_idx = i; 1560 return 0; 1561 } 1562 } 1563 1564 /* case 3: RFE match, CV max in table */ 1565 for (i = 0; i < *headline_size; i++) { 1566 reg = &table->regs[i]; 1567 rfe_para = get_phy_cond_rfe(reg->addr); 1568 cv_para = get_phy_cond_cv(reg->addr); 1569 if (rfe_para == rfe) { 1570 if (cv_para >= cv_max) { 1571 cv_max = cv_para; 1572 *headline_idx = i; 1573 case_matched = true; 1574 } 1575 } 1576 } 1577 1578 if (case_matched) 1579 return 0; 1580 1581 /* case 4: RFE don't care, CV max in table */ 1582 for (i = 0; i < *headline_size; i++) { 1583 reg = &table->regs[i]; 1584 rfe_para = get_phy_cond_rfe(reg->addr); 1585 cv_para = get_phy_cond_cv(reg->addr); 1586 if (rfe_para == PHY_COND_DONT_CARE) { 1587 if (cv_para >= cv_max) { 1588 cv_max = cv_para; 1589 *headline_idx = i; 1590 case_matched = true; 1591 } 1592 } 1593 } 1594 1595 if (case_matched) 1596 return 0; 1597 1598 return -EINVAL; 1599 } 1600 1601 static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev, 1602 const struct rtw89_phy_table *table, 1603 void (*config)(struct rtw89_dev *rtwdev, 1604 const struct rtw89_reg2_def *reg, 1605 enum rtw89_rf_path rf_path, 1606 void *data), 1607 void *extra_data) 1608 { 1609 const struct rtw89_reg2_def *reg; 1610 enum rtw89_rf_path rf_path = table->rf_path; 1611 u8 rfe = rtwdev->efuse.rfe_type; 1612 u8 cv = rtwdev->hal.cv; 1613 u32 i; 1614 u32 headline_size = 0, headline_idx = 0; 1615 u32 target = 0, cfg_target; 1616 u8 cond; 1617 bool is_matched = true; 1618 bool target_found = false; 1619 int ret; 1620 1621 ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size, 1622 &headline_idx, rfe, cv); 1623 if (ret) { 1624 rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv); 1625 return; 1626 } 1627 1628 cfg_target = get_phy_target(table->regs[headline_idx].addr); 1629 for (i = headline_size; i < table->n_regs; i++) { 1630 reg = &table->regs[i]; 1631 cond = get_phy_cond(reg->addr); 1632 switch (cond) { 1633 case PHY_COND_BRANCH_IF: 1634 case PHY_COND_BRANCH_ELIF: 1635 target = get_phy_target(reg->addr); 1636 break; 1637 case PHY_COND_BRANCH_ELSE: 1638 is_matched = false; 1639 if (!target_found) { 1640 rtw89_warn(rtwdev, "failed to load CR %x/%x\n", 1641 reg->addr, reg->data); 1642 return; 1643 } 1644 break; 1645 case PHY_COND_BRANCH_END: 1646 is_matched = true; 1647 target_found = false; 1648 break; 1649 case PHY_COND_CHECK: 1650 if (target_found) { 1651 is_matched = false; 1652 break; 1653 } 1654 1655 if (target == cfg_target) { 1656 is_matched = true; 1657 target_found = true; 1658 } else { 1659 is_matched = false; 1660 target_found = false; 1661 } 1662 break; 1663 default: 1664 if (is_matched) 1665 config(rtwdev, reg, rf_path, extra_data); 1666 break; 1667 } 1668 } 1669 } 1670 1671 void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev) 1672 { 1673 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1674 const struct rtw89_chip_info *chip = rtwdev->chip; 1675 const struct rtw89_phy_table *bb_table; 1676 const struct rtw89_phy_table *bb_gain_table; 1677 1678 bb_table = elm_info->bb_tbl ? elm_info->bb_tbl : chip->bb_table; 1679 rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL); 1680 if (rtwdev->dbcc_en) 1681 rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, 1682 (void *)RTW89_PHY_1); 1683 1684 rtw89_chip_init_txpwr_unit(rtwdev); 1685 1686 bb_gain_table = elm_info->bb_gain ? elm_info->bb_gain : chip->bb_gain_table; 1687 if (bb_gain_table) 1688 rtw89_phy_init_reg(rtwdev, bb_gain_table, 1689 chip->phy_def->config_bb_gain, NULL); 1690 1691 rtw89_phy_bb_reset(rtwdev); 1692 } 1693 1694 static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev) 1695 { 1696 rtw89_phy_write32(rtwdev, 0x8080, 0x4); 1697 udelay(1); 1698 return rtw89_phy_read32(rtwdev, 0x8080); 1699 } 1700 1701 void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev, bool noio) 1702 { 1703 void (*config)(struct rtw89_dev *rtwdev, const struct rtw89_reg2_def *reg, 1704 enum rtw89_rf_path rf_path, void *data); 1705 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1706 const struct rtw89_chip_info *chip = rtwdev->chip; 1707 const struct rtw89_phy_table *rf_table; 1708 struct rtw89_fw_h2c_rf_reg_info *rf_reg_info; 1709 u8 path; 1710 1711 rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL); 1712 if (!rf_reg_info) 1713 return; 1714 1715 for (path = RF_PATH_A; path < chip->rf_path_num; path++) { 1716 rf_table = elm_info->rf_radio[path] ? 1717 elm_info->rf_radio[path] : chip->rf_table[path]; 1718 rf_reg_info->rf_path = rf_table->rf_path; 1719 if (noio) 1720 config = rtw89_phy_config_rf_reg_noio; 1721 else 1722 config = rf_table->config ? rf_table->config : 1723 rtw89_phy_config_rf_reg; 1724 rtw89_phy_init_reg(rtwdev, rf_table, config, (void *)rf_reg_info); 1725 if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info)) 1726 rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n", 1727 rf_reg_info->rf_path); 1728 } 1729 kfree(rf_reg_info); 1730 } 1731 1732 static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev) 1733 { 1734 const struct rtw89_chip_info *chip = rtwdev->chip; 1735 u32 val; 1736 int ret; 1737 1738 /* IQK/DPK clock & reset */ 1739 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x3); 1740 rtw89_phy_write32_set(rtwdev, R_GNT_BT_WGT_EN, 0x1); 1741 rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000); 1742 if (chip->chip_id != RTL8851B) 1743 rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000); 1744 if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT) 1745 rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2); 1746 1747 /* check 0x8080 */ 1748 rtw89_phy_write32(rtwdev, R_NCTL_CFG, 0x8); 1749 1750 ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10, 1751 1000, false, rtwdev); 1752 if (ret) 1753 rtw89_err(rtwdev, "failed to poll nctl block\n"); 1754 } 1755 1756 static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev) 1757 { 1758 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1759 const struct rtw89_chip_info *chip = rtwdev->chip; 1760 const struct rtw89_phy_table *nctl_table; 1761 1762 rtw89_phy_preinit_rf_nctl(rtwdev); 1763 1764 nctl_table = elm_info->rf_nctl ? elm_info->rf_nctl : chip->nctl_table; 1765 rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL); 1766 1767 if (chip->nctl_post_table) 1768 rtw89_rfk_parser(rtwdev, chip->nctl_post_table); 1769 } 1770 1771 static u32 rtw89_phy0_phy1_offset_ax(struct rtw89_dev *rtwdev, u32 addr) 1772 { 1773 u32 phy_page = addr >> 8; 1774 u32 ofst = 0; 1775 1776 switch (phy_page) { 1777 case 0x6: 1778 case 0x7: 1779 case 0x8: 1780 case 0x9: 1781 case 0xa: 1782 case 0xb: 1783 case 0xc: 1784 case 0xd: 1785 case 0x19: 1786 case 0x1a: 1787 case 0x1b: 1788 ofst = 0x2000; 1789 break; 1790 default: 1791 /* warning case */ 1792 ofst = 0; 1793 break; 1794 } 1795 1796 if (phy_page >= 0x40 && phy_page <= 0x4f) 1797 ofst = 0x2000; 1798 1799 return ofst; 1800 } 1801 1802 void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1803 u32 data, enum rtw89_phy_idx phy_idx) 1804 { 1805 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1806 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1807 rtw89_phy_write32_mask(rtwdev, addr, mask, data); 1808 } 1809 EXPORT_SYMBOL(rtw89_phy_write32_idx); 1810 1811 void rtw89_phy_write32_idx_set(struct rtw89_dev *rtwdev, u32 addr, u32 bits, 1812 enum rtw89_phy_idx phy_idx) 1813 { 1814 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1815 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1816 rtw89_phy_write32_set(rtwdev, addr, bits); 1817 } 1818 EXPORT_SYMBOL(rtw89_phy_write32_idx_set); 1819 1820 void rtw89_phy_write32_idx_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bits, 1821 enum rtw89_phy_idx phy_idx) 1822 { 1823 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1824 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1825 rtw89_phy_write32_clr(rtwdev, addr, bits); 1826 } 1827 EXPORT_SYMBOL(rtw89_phy_write32_idx_clr); 1828 1829 u32 rtw89_phy_read32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1830 enum rtw89_phy_idx phy_idx) 1831 { 1832 if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1) 1833 addr += rtw89_phy0_phy1_offset(rtwdev, addr); 1834 return rtw89_phy_read32_mask(rtwdev, addr, mask); 1835 } 1836 EXPORT_SYMBOL(rtw89_phy_read32_idx); 1837 1838 void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask, 1839 u32 val) 1840 { 1841 rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0); 1842 1843 if (!rtwdev->dbcc_en) 1844 return; 1845 1846 rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1); 1847 } 1848 EXPORT_SYMBOL(rtw89_phy_set_phy_regs); 1849 1850 void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev, 1851 const struct rtw89_phy_reg3_tbl *tbl) 1852 { 1853 const struct rtw89_reg3_def *reg3; 1854 int i; 1855 1856 for (i = 0; i < tbl->size; i++) { 1857 reg3 = &tbl->reg3[i]; 1858 rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data); 1859 } 1860 } 1861 EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl); 1862 1863 static u8 rtw89_phy_ant_gain_domain_to_regd(struct rtw89_dev *rtwdev, u8 ant_gain_regd) 1864 { 1865 switch (ant_gain_regd) { 1866 case RTW89_ANT_GAIN_ETSI: 1867 return RTW89_ETSI; 1868 default: 1869 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1870 "unknown antenna gain domain: %d\n", 1871 ant_gain_regd); 1872 return RTW89_REGD_NUM; 1873 } 1874 } 1875 1876 /* antenna gain in unit of 0.25 dbm */ 1877 #define RTW89_ANT_GAIN_2GHZ_MIN -8 1878 #define RTW89_ANT_GAIN_2GHZ_MAX 14 1879 #define RTW89_ANT_GAIN_5GHZ_MIN -8 1880 #define RTW89_ANT_GAIN_5GHZ_MAX 20 1881 #define RTW89_ANT_GAIN_6GHZ_MIN -8 1882 #define RTW89_ANT_GAIN_6GHZ_MAX 20 1883 1884 #define RTW89_ANT_GAIN_REF_2GHZ 14 1885 #define RTW89_ANT_GAIN_REF_5GHZ 20 1886 #define RTW89_ANT_GAIN_REF_6GHZ 20 1887 1888 void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev) 1889 { 1890 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 1891 const struct rtw89_chip_info *chip = rtwdev->chip; 1892 struct rtw89_acpi_rtag_result res = {}; 1893 u32 domain; 1894 int ret; 1895 u8 i, j; 1896 u8 regd; 1897 u8 val; 1898 1899 if (!chip->support_ant_gain) 1900 return; 1901 1902 ret = rtw89_acpi_evaluate_rtag(rtwdev, &res); 1903 if (ret) { 1904 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1905 "acpi: cannot eval rtag: %d\n", ret); 1906 return; 1907 } 1908 1909 if (res.revision != 0) { 1910 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1911 "unknown rtag revision: %d\n", res.revision); 1912 return; 1913 } 1914 1915 domain = get_unaligned_le32(&res.domain); 1916 1917 for (i = 0; i < RTW89_ANT_GAIN_DOMAIN_NUM; i++) { 1918 if (!(domain & BIT(i))) 1919 continue; 1920 1921 regd = rtw89_phy_ant_gain_domain_to_regd(rtwdev, i); 1922 if (regd >= RTW89_REGD_NUM) 1923 continue; 1924 ant_gain->regd_enabled |= BIT(regd); 1925 } 1926 1927 for (i = 0; i < RTW89_ANT_GAIN_CHAIN_NUM; i++) { 1928 for (j = 0; j < RTW89_ANT_GAIN_SUBBAND_NR; j++) { 1929 val = res.ant_gain_table[i][j]; 1930 switch (j) { 1931 default: 1932 case RTW89_ANT_GAIN_2GHZ_SUBBAND: 1933 val = RTW89_ANT_GAIN_REF_2GHZ - 1934 clamp_t(s8, val, 1935 RTW89_ANT_GAIN_2GHZ_MIN, 1936 RTW89_ANT_GAIN_2GHZ_MAX); 1937 break; 1938 case RTW89_ANT_GAIN_5GHZ_SUBBAND_1: 1939 case RTW89_ANT_GAIN_5GHZ_SUBBAND_2: 1940 case RTW89_ANT_GAIN_5GHZ_SUBBAND_2E: 1941 case RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4: 1942 val = RTW89_ANT_GAIN_REF_5GHZ - 1943 clamp_t(s8, val, 1944 RTW89_ANT_GAIN_5GHZ_MIN, 1945 RTW89_ANT_GAIN_5GHZ_MAX); 1946 break; 1947 case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L: 1948 case RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H: 1949 case RTW89_ANT_GAIN_6GHZ_SUBBAND_6: 1950 case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L: 1951 case RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H: 1952 case RTW89_ANT_GAIN_6GHZ_SUBBAND_8: 1953 val = RTW89_ANT_GAIN_REF_6GHZ - 1954 clamp_t(s8, val, 1955 RTW89_ANT_GAIN_6GHZ_MIN, 1956 RTW89_ANT_GAIN_6GHZ_MAX); 1957 } 1958 ant_gain->offset[i][j] = val; 1959 } 1960 } 1961 } 1962 1963 static 1964 enum rtw89_ant_gain_subband rtw89_phy_ant_gain_get_subband(struct rtw89_dev *rtwdev, 1965 u32 center_freq) 1966 { 1967 switch (center_freq) { 1968 default: 1969 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 1970 "center freq: %u to antenna gain subband is unhandled\n", 1971 center_freq); 1972 fallthrough; 1973 case 2412 ... 2484: 1974 return RTW89_ANT_GAIN_2GHZ_SUBBAND; 1975 case 5180 ... 5240: 1976 return RTW89_ANT_GAIN_5GHZ_SUBBAND_1; 1977 case 5250 ... 5320: 1978 return RTW89_ANT_GAIN_5GHZ_SUBBAND_2; 1979 case 5500 ... 5720: 1980 return RTW89_ANT_GAIN_5GHZ_SUBBAND_2E; 1981 case 5745 ... 5885: 1982 return RTW89_ANT_GAIN_5GHZ_SUBBAND_3_4; 1983 case 5955 ... 6155: 1984 return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_L; 1985 case 6175 ... 6415: 1986 return RTW89_ANT_GAIN_6GHZ_SUBBAND_5_H; 1987 case 6435 ... 6515: 1988 return RTW89_ANT_GAIN_6GHZ_SUBBAND_6; 1989 case 6535 ... 6695: 1990 return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_L; 1991 case 6715 ... 6855: 1992 return RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H; 1993 1994 /* freq 6875 (ch 185, 20MHz) spans RTW89_ANT_GAIN_6GHZ_SUBBAND_7_H 1995 * and RTW89_ANT_GAIN_6GHZ_SUBBAND_8, so directly describe it with 1996 * struct rtw89_6ghz_span. 1997 */ 1998 1999 case 6895 ... 7115: 2000 return RTW89_ANT_GAIN_6GHZ_SUBBAND_8; 2001 } 2002 } 2003 2004 static s8 rtw89_phy_ant_gain_query(struct rtw89_dev *rtwdev, 2005 enum rtw89_rf_path path, u32 center_freq) 2006 { 2007 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2008 enum rtw89_ant_gain_subband subband_l, subband_h; 2009 const struct rtw89_6ghz_span *span; 2010 2011 span = rtw89_get_6ghz_span(rtwdev, center_freq); 2012 2013 if (span && RTW89_ANT_GAIN_SPAN_VALID(span)) { 2014 subband_l = span->ant_gain_subband_low; 2015 subband_h = span->ant_gain_subband_high; 2016 } else { 2017 subband_l = rtw89_phy_ant_gain_get_subband(rtwdev, center_freq); 2018 subband_h = subband_l; 2019 } 2020 2021 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2022 "center_freq %u: antenna gain subband {%u, %u}\n", 2023 center_freq, subband_l, subband_h); 2024 2025 return min(ant_gain->offset[path][subband_l], 2026 ant_gain->offset[path][subband_h]); 2027 } 2028 2029 static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 center_freq) 2030 { 2031 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2032 const struct rtw89_chip_info *chip = rtwdev->chip; 2033 u8 regd = rtw89_regd_get(rtwdev, band); 2034 s8 offset_patha, offset_pathb; 2035 2036 if (!chip->support_ant_gain) 2037 return 0; 2038 2039 if (!(ant_gain->regd_enabled & BIT(regd))) 2040 return 0; 2041 2042 offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq); 2043 offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq); 2044 2045 return max(offset_patha, offset_pathb); 2046 } 2047 2048 s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, 2049 const struct rtw89_chan *chan) 2050 { 2051 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2052 u8 regd = rtw89_regd_get(rtwdev, chan->band_type); 2053 s8 offset_patha, offset_pathb; 2054 2055 if (!(ant_gain->regd_enabled & BIT(regd))) 2056 return 0; 2057 2058 offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq); 2059 offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq); 2060 2061 return rtw89_phy_txpwr_rf_to_bb(rtwdev, offset_patha - offset_pathb); 2062 } 2063 EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset); 2064 2065 void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev, 2066 const struct rtw89_chan *chan) 2067 { 2068 struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; 2069 const struct rtw89_chip_info *chip = rtwdev->chip; 2070 u8 regd = rtw89_regd_get(rtwdev, chan->band_type); 2071 s8 offset_patha, offset_pathb; 2072 2073 if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) { 2074 seq_puts(m, "no DAG is applied\n"); 2075 return; 2076 } 2077 2078 offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq); 2079 offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq); 2080 2081 seq_printf(m, "ChainA offset: %d dBm\n", offset_patha); 2082 seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb); 2083 } 2084 2085 static const u8 rtw89_rs_idx_num_ax[] = { 2086 [RTW89_RS_CCK] = RTW89_RATE_CCK_NUM, 2087 [RTW89_RS_OFDM] = RTW89_RATE_OFDM_NUM, 2088 [RTW89_RS_MCS] = RTW89_RATE_MCS_NUM_AX, 2089 [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_NUM, 2090 [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_NUM_AX, 2091 }; 2092 2093 static const u8 rtw89_rs_nss_num_ax[] = { 2094 [RTW89_RS_CCK] = 1, 2095 [RTW89_RS_OFDM] = 1, 2096 [RTW89_RS_MCS] = RTW89_NSS_NUM, 2097 [RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_NUM, 2098 [RTW89_RS_OFFSET] = 1, 2099 }; 2100 2101 s8 *rtw89_phy_raw_byr_seek(struct rtw89_dev *rtwdev, 2102 struct rtw89_txpwr_byrate *head, 2103 const struct rtw89_rate_desc *desc) 2104 { 2105 switch (desc->rs) { 2106 case RTW89_RS_CCK: 2107 return &head->cck[desc->idx]; 2108 case RTW89_RS_OFDM: 2109 return &head->ofdm[desc->idx]; 2110 case RTW89_RS_MCS: 2111 return &head->mcs[desc->ofdma][desc->nss][desc->idx]; 2112 case RTW89_RS_HEDCM: 2113 return &head->hedcm[desc->ofdma][desc->nss][desc->idx]; 2114 case RTW89_RS_OFFSET: 2115 return &head->offset[desc->idx]; 2116 default: 2117 rtw89_warn(rtwdev, "unrecognized byr rs: %d\n", desc->rs); 2118 return &head->trap; 2119 } 2120 } 2121 2122 void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, 2123 const struct rtw89_txpwr_table *tbl) 2124 { 2125 const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data; 2126 const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size; 2127 struct rtw89_txpwr_byrate *byr_head; 2128 struct rtw89_rate_desc desc = {}; 2129 s8 *byr; 2130 u32 data; 2131 u8 i; 2132 2133 for (; cfg < end; cfg++) { 2134 byr_head = &rtwdev->byr[cfg->band][0]; 2135 desc.rs = cfg->rs; 2136 desc.nss = cfg->nss; 2137 data = cfg->data; 2138 2139 for (i = 0; i < cfg->len; i++, data >>= 8) { 2140 desc.idx = cfg->shf + i; 2141 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 2142 *byr = data & 0xff; 2143 } 2144 } 2145 } 2146 EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate); 2147 2148 static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm) 2149 { 2150 const u8 tssi_deviation_point = 0; 2151 const u8 tssi_max_deviation = 2; 2152 2153 if (dbm <= tssi_deviation_point) 2154 dbm -= tssi_max_deviation; 2155 2156 return dbm; 2157 } 2158 2159 static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band) 2160 { 2161 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2162 const struct rtw89_reg_6ghz_tpe *tpe = ®ulatory->reg_6ghz_tpe; 2163 s8 cstr = S8_MAX; 2164 2165 if (band == RTW89_BAND_6G && tpe->valid) 2166 cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint); 2167 2168 return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr); 2169 } 2170 2171 s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, 2172 const struct rtw89_rate_desc *rate_desc) 2173 { 2174 struct rtw89_txpwr_byrate *byr_head; 2175 s8 *byr; 2176 2177 if (rate_desc->rs == RTW89_RS_CCK) 2178 band = RTW89_BAND_2G; 2179 2180 byr_head = &rtwdev->byr[band][bw]; 2181 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, rate_desc); 2182 2183 return rtw89_phy_txpwr_rf_to_mac(rtwdev, *byr); 2184 } 2185 2186 static u8 rtw89_channel_6g_to_idx(struct rtw89_dev *rtwdev, u8 channel_6g) 2187 { 2188 switch (channel_6g) { 2189 case 1 ... 29: 2190 return (channel_6g - 1) / 2; 2191 case 33 ... 61: 2192 return (channel_6g - 3) / 2; 2193 case 65 ... 93: 2194 return (channel_6g - 5) / 2; 2195 case 97 ... 125: 2196 return (channel_6g - 7) / 2; 2197 case 129 ... 157: 2198 return (channel_6g - 9) / 2; 2199 case 161 ... 189: 2200 return (channel_6g - 11) / 2; 2201 case 193 ... 221: 2202 return (channel_6g - 13) / 2; 2203 case 225 ... 253: 2204 return (channel_6g - 15) / 2; 2205 default: 2206 rtw89_warn(rtwdev, "unknown 6g channel: %d\n", channel_6g); 2207 return 0; 2208 } 2209 } 2210 2211 static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 band, u8 channel) 2212 { 2213 if (band == RTW89_BAND_6G) 2214 return rtw89_channel_6g_to_idx(rtwdev, channel); 2215 2216 switch (channel) { 2217 case 1 ... 14: 2218 return channel - 1; 2219 case 36 ... 64: 2220 return (channel - 36) / 2; 2221 case 100 ... 144: 2222 return ((channel - 100) / 2) + 15; 2223 case 149 ... 177: 2224 return ((channel - 149) / 2) + 38; 2225 default: 2226 rtw89_warn(rtwdev, "unknown channel: %d\n", channel); 2227 return 0; 2228 } 2229 } 2230 2231 s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, 2232 u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch) 2233 { 2234 const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; 2235 const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; 2236 const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; 2237 const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; 2238 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2239 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 2240 u32 freq = ieee80211_channel_to_frequency(ch, nl_band); 2241 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2242 u8 regd = rtw89_regd_get(rtwdev, band); 2243 u8 reg6 = regulatory->reg_6ghz_power; 2244 s8 lmt = 0, sar, offset; 2245 s8 cstr; 2246 2247 switch (band) { 2248 case RTW89_BAND_2G: 2249 lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; 2250 if (lmt) 2251 break; 2252 2253 lmt = (*rule_2ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; 2254 break; 2255 case RTW89_BAND_5G: 2256 lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][regd][ch_idx]; 2257 if (lmt) 2258 break; 2259 2260 lmt = (*rule_5ghz->lmt)[bw][ntx][rs][bf][RTW89_WW][ch_idx]; 2261 break; 2262 case RTW89_BAND_6G: 2263 lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][regd][reg6][ch_idx]; 2264 if (lmt) 2265 break; 2266 2267 lmt = (*rule_6ghz->lmt)[bw][ntx][rs][bf][RTW89_WW] 2268 [RTW89_REG_6GHZ_POWER_DFLT] 2269 [ch_idx]; 2270 break; 2271 default: 2272 rtw89_warn(rtwdev, "unknown band type: %d\n", band); 2273 return 0; 2274 } 2275 2276 offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq); 2277 lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt + offset); 2278 sar = rtw89_query_sar(rtwdev, freq); 2279 cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); 2280 2281 return min3(lmt, sar, cstr); 2282 } 2283 EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit); 2284 2285 #define __fill_txpwr_limit_nonbf_bf(ptr, band, bw, ntx, rs, ch) \ 2286 do { \ 2287 u8 __i; \ 2288 for (__i = 0; __i < RTW89_BF_NUM; __i++) \ 2289 ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \ 2290 band, \ 2291 bw, ntx, \ 2292 rs, __i, \ 2293 (ch)); \ 2294 } while (0) 2295 2296 static void rtw89_phy_fill_txpwr_limit_20m_ax(struct rtw89_dev *rtwdev, 2297 struct rtw89_txpwr_limit_ax *lmt, 2298 u8 band, u8 ntx, u8 ch) 2299 { 2300 __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, 2301 ntx, RTW89_RS_CCK, ch); 2302 __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40, 2303 ntx, RTW89_RS_CCK, ch); 2304 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2305 ntx, RTW89_RS_OFDM, ch); 2306 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2307 RTW89_CHANNEL_WIDTH_20, 2308 ntx, RTW89_RS_MCS, ch); 2309 } 2310 2311 static void rtw89_phy_fill_txpwr_limit_40m_ax(struct rtw89_dev *rtwdev, 2312 struct rtw89_txpwr_limit_ax *lmt, 2313 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2314 { 2315 __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, band, RTW89_CHANNEL_WIDTH_20, 2316 ntx, RTW89_RS_CCK, ch - 2); 2317 __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, band, RTW89_CHANNEL_WIDTH_40, 2318 ntx, RTW89_RS_CCK, ch); 2319 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2320 ntx, RTW89_RS_OFDM, pri_ch); 2321 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2322 RTW89_CHANNEL_WIDTH_20, 2323 ntx, RTW89_RS_MCS, ch - 2); 2324 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2325 RTW89_CHANNEL_WIDTH_20, 2326 ntx, RTW89_RS_MCS, ch + 2); 2327 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2328 RTW89_CHANNEL_WIDTH_40, 2329 ntx, RTW89_RS_MCS, ch); 2330 } 2331 2332 static void rtw89_phy_fill_txpwr_limit_80m_ax(struct rtw89_dev *rtwdev, 2333 struct rtw89_txpwr_limit_ax *lmt, 2334 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2335 { 2336 s8 val_0p5_n[RTW89_BF_NUM]; 2337 s8 val_0p5_p[RTW89_BF_NUM]; 2338 u8 i; 2339 2340 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2341 ntx, RTW89_RS_OFDM, pri_ch); 2342 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2343 RTW89_CHANNEL_WIDTH_20, 2344 ntx, RTW89_RS_MCS, ch - 6); 2345 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2346 RTW89_CHANNEL_WIDTH_20, 2347 ntx, RTW89_RS_MCS, ch - 2); 2348 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band, 2349 RTW89_CHANNEL_WIDTH_20, 2350 ntx, RTW89_RS_MCS, ch + 2); 2351 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band, 2352 RTW89_CHANNEL_WIDTH_20, 2353 ntx, RTW89_RS_MCS, ch + 6); 2354 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2355 RTW89_CHANNEL_WIDTH_40, 2356 ntx, RTW89_RS_MCS, ch - 4); 2357 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band, 2358 RTW89_CHANNEL_WIDTH_40, 2359 ntx, RTW89_RS_MCS, ch + 4); 2360 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band, 2361 RTW89_CHANNEL_WIDTH_80, 2362 ntx, RTW89_RS_MCS, ch); 2363 2364 __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40, 2365 ntx, RTW89_RS_MCS, ch - 4); 2366 __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40, 2367 ntx, RTW89_RS_MCS, ch + 4); 2368 2369 for (i = 0; i < RTW89_BF_NUM; i++) 2370 lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); 2371 } 2372 2373 static void rtw89_phy_fill_txpwr_limit_160m_ax(struct rtw89_dev *rtwdev, 2374 struct rtw89_txpwr_limit_ax *lmt, 2375 u8 band, u8 ntx, u8 ch, u8 pri_ch) 2376 { 2377 s8 val_0p5_n[RTW89_BF_NUM]; 2378 s8 val_0p5_p[RTW89_BF_NUM]; 2379 s8 val_2p5_n[RTW89_BF_NUM]; 2380 s8 val_2p5_p[RTW89_BF_NUM]; 2381 u8 i; 2382 2383 /* fill ofdm section */ 2384 __fill_txpwr_limit_nonbf_bf(lmt->ofdm, band, RTW89_CHANNEL_WIDTH_20, 2385 ntx, RTW89_RS_OFDM, pri_ch); 2386 2387 /* fill mcs 20m section */ 2388 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], band, 2389 RTW89_CHANNEL_WIDTH_20, 2390 ntx, RTW89_RS_MCS, ch - 14); 2391 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], band, 2392 RTW89_CHANNEL_WIDTH_20, 2393 ntx, RTW89_RS_MCS, ch - 10); 2394 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], band, 2395 RTW89_CHANNEL_WIDTH_20, 2396 ntx, RTW89_RS_MCS, ch - 6); 2397 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], band, 2398 RTW89_CHANNEL_WIDTH_20, 2399 ntx, RTW89_RS_MCS, ch - 2); 2400 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[4], band, 2401 RTW89_CHANNEL_WIDTH_20, 2402 ntx, RTW89_RS_MCS, ch + 2); 2403 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[5], band, 2404 RTW89_CHANNEL_WIDTH_20, 2405 ntx, RTW89_RS_MCS, ch + 6); 2406 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[6], band, 2407 RTW89_CHANNEL_WIDTH_20, 2408 ntx, RTW89_RS_MCS, ch + 10); 2409 __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[7], band, 2410 RTW89_CHANNEL_WIDTH_20, 2411 ntx, RTW89_RS_MCS, ch + 14); 2412 2413 /* fill mcs 40m section */ 2414 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], band, 2415 RTW89_CHANNEL_WIDTH_40, 2416 ntx, RTW89_RS_MCS, ch - 12); 2417 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], band, 2418 RTW89_CHANNEL_WIDTH_40, 2419 ntx, RTW89_RS_MCS, ch - 4); 2420 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[2], band, 2421 RTW89_CHANNEL_WIDTH_40, 2422 ntx, RTW89_RS_MCS, ch + 4); 2423 __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[3], band, 2424 RTW89_CHANNEL_WIDTH_40, 2425 ntx, RTW89_RS_MCS, ch + 12); 2426 2427 /* fill mcs 80m section */ 2428 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], band, 2429 RTW89_CHANNEL_WIDTH_80, 2430 ntx, RTW89_RS_MCS, ch - 8); 2431 __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[1], band, 2432 RTW89_CHANNEL_WIDTH_80, 2433 ntx, RTW89_RS_MCS, ch + 8); 2434 2435 /* fill mcs 160m section */ 2436 __fill_txpwr_limit_nonbf_bf(lmt->mcs_160m, band, 2437 RTW89_CHANNEL_WIDTH_160, 2438 ntx, RTW89_RS_MCS, ch); 2439 2440 /* fill mcs 40m 0p5 section */ 2441 __fill_txpwr_limit_nonbf_bf(val_0p5_n, band, RTW89_CHANNEL_WIDTH_40, 2442 ntx, RTW89_RS_MCS, ch - 4); 2443 __fill_txpwr_limit_nonbf_bf(val_0p5_p, band, RTW89_CHANNEL_WIDTH_40, 2444 ntx, RTW89_RS_MCS, ch + 4); 2445 2446 for (i = 0; i < RTW89_BF_NUM; i++) 2447 lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]); 2448 2449 /* fill mcs 40m 2p5 section */ 2450 __fill_txpwr_limit_nonbf_bf(val_2p5_n, band, RTW89_CHANNEL_WIDTH_40, 2451 ntx, RTW89_RS_MCS, ch - 8); 2452 __fill_txpwr_limit_nonbf_bf(val_2p5_p, band, RTW89_CHANNEL_WIDTH_40, 2453 ntx, RTW89_RS_MCS, ch + 8); 2454 2455 for (i = 0; i < RTW89_BF_NUM; i++) 2456 lmt->mcs_40m_2p5[i] = min_t(s8, val_2p5_n[i], val_2p5_p[i]); 2457 } 2458 2459 static 2460 void rtw89_phy_fill_txpwr_limit_ax(struct rtw89_dev *rtwdev, 2461 const struct rtw89_chan *chan, 2462 struct rtw89_txpwr_limit_ax *lmt, 2463 u8 ntx) 2464 { 2465 u8 band = chan->band_type; 2466 u8 pri_ch = chan->primary_channel; 2467 u8 ch = chan->channel; 2468 u8 bw = chan->band_width; 2469 2470 memset(lmt, 0, sizeof(*lmt)); 2471 2472 switch (bw) { 2473 case RTW89_CHANNEL_WIDTH_20: 2474 rtw89_phy_fill_txpwr_limit_20m_ax(rtwdev, lmt, band, ntx, ch); 2475 break; 2476 case RTW89_CHANNEL_WIDTH_40: 2477 rtw89_phy_fill_txpwr_limit_40m_ax(rtwdev, lmt, band, ntx, ch, 2478 pri_ch); 2479 break; 2480 case RTW89_CHANNEL_WIDTH_80: 2481 rtw89_phy_fill_txpwr_limit_80m_ax(rtwdev, lmt, band, ntx, ch, 2482 pri_ch); 2483 break; 2484 case RTW89_CHANNEL_WIDTH_160: 2485 rtw89_phy_fill_txpwr_limit_160m_ax(rtwdev, lmt, band, ntx, ch, 2486 pri_ch); 2487 break; 2488 } 2489 } 2490 2491 s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, 2492 u8 ru, u8 ntx, u8 ch) 2493 { 2494 const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; 2495 const struct rtw89_txpwr_rule_2ghz *rule_2ghz = &rfe_parms->rule_2ghz; 2496 const struct rtw89_txpwr_rule_5ghz *rule_5ghz = &rfe_parms->rule_5ghz; 2497 const struct rtw89_txpwr_rule_6ghz *rule_6ghz = &rfe_parms->rule_6ghz; 2498 struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; 2499 enum nl80211_band nl_band = rtw89_hw_to_nl80211_band(band); 2500 u32 freq = ieee80211_channel_to_frequency(ch, nl_band); 2501 u8 ch_idx = rtw89_channel_to_idx(rtwdev, band, ch); 2502 u8 regd = rtw89_regd_get(rtwdev, band); 2503 u8 reg6 = regulatory->reg_6ghz_power; 2504 s8 lmt_ru = 0, sar, offset; 2505 s8 cstr; 2506 2507 switch (band) { 2508 case RTW89_BAND_2G: 2509 lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][regd][ch_idx]; 2510 if (lmt_ru) 2511 break; 2512 2513 lmt_ru = (*rule_2ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; 2514 break; 2515 case RTW89_BAND_5G: 2516 lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][regd][ch_idx]; 2517 if (lmt_ru) 2518 break; 2519 2520 lmt_ru = (*rule_5ghz->lmt_ru)[ru][ntx][RTW89_WW][ch_idx]; 2521 break; 2522 case RTW89_BAND_6G: 2523 lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][regd][reg6][ch_idx]; 2524 if (lmt_ru) 2525 break; 2526 2527 lmt_ru = (*rule_6ghz->lmt_ru)[ru][ntx][RTW89_WW] 2528 [RTW89_REG_6GHZ_POWER_DFLT] 2529 [ch_idx]; 2530 break; 2531 default: 2532 rtw89_warn(rtwdev, "unknown band type: %d\n", band); 2533 return 0; 2534 } 2535 2536 offset = rtw89_phy_ant_gain_offset(rtwdev, band, freq); 2537 lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru + offset); 2538 sar = rtw89_query_sar(rtwdev, freq); 2539 cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); 2540 2541 return min3(lmt_ru, sar, cstr); 2542 } 2543 2544 static void 2545 rtw89_phy_fill_txpwr_limit_ru_20m_ax(struct rtw89_dev *rtwdev, 2546 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2547 u8 band, u8 ntx, u8 ch) 2548 { 2549 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2550 RTW89_RU26, 2551 ntx, ch); 2552 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2553 RTW89_RU52, 2554 ntx, ch); 2555 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2556 RTW89_RU106, 2557 ntx, ch); 2558 } 2559 2560 static void 2561 rtw89_phy_fill_txpwr_limit_ru_40m_ax(struct rtw89_dev *rtwdev, 2562 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2563 u8 band, u8 ntx, u8 ch) 2564 { 2565 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2566 RTW89_RU26, 2567 ntx, ch - 2); 2568 lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2569 RTW89_RU26, 2570 ntx, ch + 2); 2571 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2572 RTW89_RU52, 2573 ntx, ch - 2); 2574 lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2575 RTW89_RU52, 2576 ntx, ch + 2); 2577 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2578 RTW89_RU106, 2579 ntx, ch - 2); 2580 lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2581 RTW89_RU106, 2582 ntx, ch + 2); 2583 } 2584 2585 static void 2586 rtw89_phy_fill_txpwr_limit_ru_80m_ax(struct rtw89_dev *rtwdev, 2587 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2588 u8 band, u8 ntx, u8 ch) 2589 { 2590 lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2591 RTW89_RU26, 2592 ntx, ch - 6); 2593 lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2594 RTW89_RU26, 2595 ntx, ch - 2); 2596 lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2597 RTW89_RU26, 2598 ntx, ch + 2); 2599 lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2600 RTW89_RU26, 2601 ntx, ch + 6); 2602 lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2603 RTW89_RU52, 2604 ntx, ch - 6); 2605 lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2606 RTW89_RU52, 2607 ntx, ch - 2); 2608 lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2609 RTW89_RU52, 2610 ntx, ch + 2); 2611 lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2612 RTW89_RU52, 2613 ntx, ch + 6); 2614 lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2615 RTW89_RU106, 2616 ntx, ch - 6); 2617 lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2618 RTW89_RU106, 2619 ntx, ch - 2); 2620 lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2621 RTW89_RU106, 2622 ntx, ch + 2); 2623 lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2624 RTW89_RU106, 2625 ntx, ch + 6); 2626 } 2627 2628 static void 2629 rtw89_phy_fill_txpwr_limit_ru_160m_ax(struct rtw89_dev *rtwdev, 2630 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2631 u8 band, u8 ntx, u8 ch) 2632 { 2633 static const int ofst[] = { -14, -10, -6, -2, 2, 6, 10, 14 }; 2634 int i; 2635 2636 static_assert(ARRAY_SIZE(ofst) == RTW89_RU_SEC_NUM_AX); 2637 for (i = 0; i < RTW89_RU_SEC_NUM_AX; i++) { 2638 lmt_ru->ru26[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2639 RTW89_RU26, 2640 ntx, 2641 ch + ofst[i]); 2642 lmt_ru->ru52[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2643 RTW89_RU52, 2644 ntx, 2645 ch + ofst[i]); 2646 lmt_ru->ru106[i] = rtw89_phy_read_txpwr_limit_ru(rtwdev, band, 2647 RTW89_RU106, 2648 ntx, 2649 ch + ofst[i]); 2650 } 2651 } 2652 2653 static 2654 void rtw89_phy_fill_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, 2655 const struct rtw89_chan *chan, 2656 struct rtw89_txpwr_limit_ru_ax *lmt_ru, 2657 u8 ntx) 2658 { 2659 u8 band = chan->band_type; 2660 u8 ch = chan->channel; 2661 u8 bw = chan->band_width; 2662 2663 memset(lmt_ru, 0, sizeof(*lmt_ru)); 2664 2665 switch (bw) { 2666 case RTW89_CHANNEL_WIDTH_20: 2667 rtw89_phy_fill_txpwr_limit_ru_20m_ax(rtwdev, lmt_ru, band, ntx, 2668 ch); 2669 break; 2670 case RTW89_CHANNEL_WIDTH_40: 2671 rtw89_phy_fill_txpwr_limit_ru_40m_ax(rtwdev, lmt_ru, band, ntx, 2672 ch); 2673 break; 2674 case RTW89_CHANNEL_WIDTH_80: 2675 rtw89_phy_fill_txpwr_limit_ru_80m_ax(rtwdev, lmt_ru, band, ntx, 2676 ch); 2677 break; 2678 case RTW89_CHANNEL_WIDTH_160: 2679 rtw89_phy_fill_txpwr_limit_ru_160m_ax(rtwdev, lmt_ru, band, ntx, 2680 ch); 2681 break; 2682 } 2683 } 2684 2685 static void rtw89_phy_set_txpwr_byrate_ax(struct rtw89_dev *rtwdev, 2686 const struct rtw89_chan *chan, 2687 enum rtw89_phy_idx phy_idx) 2688 { 2689 u8 max_nss_num = rtwdev->chip->rf_path_num; 2690 static const u8 rs[] = { 2691 RTW89_RS_CCK, 2692 RTW89_RS_OFDM, 2693 RTW89_RS_MCS, 2694 RTW89_RS_HEDCM, 2695 }; 2696 struct rtw89_rate_desc cur = {}; 2697 u8 band = chan->band_type; 2698 u8 ch = chan->channel; 2699 u32 addr, val; 2700 s8 v[4] = {}; 2701 u8 i; 2702 2703 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2704 "[TXPWR] set txpwr byrate with ch=%d\n", ch); 2705 2706 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_CCK] % 4); 2707 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_OFDM] % 4); 2708 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_MCS] % 4); 2709 BUILD_BUG_ON(rtw89_rs_idx_num_ax[RTW89_RS_HEDCM] % 4); 2710 2711 addr = R_AX_PWR_BY_RATE; 2712 for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) { 2713 for (i = 0; i < ARRAY_SIZE(rs); i++) { 2714 if (cur.nss >= rtw89_rs_nss_num_ax[rs[i]]) 2715 continue; 2716 2717 cur.rs = rs[i]; 2718 for (cur.idx = 0; cur.idx < rtw89_rs_idx_num_ax[rs[i]]; 2719 cur.idx++) { 2720 v[cur.idx % 4] = 2721 rtw89_phy_read_txpwr_byrate(rtwdev, 2722 band, 0, 2723 &cur); 2724 2725 if ((cur.idx + 1) % 4) 2726 continue; 2727 2728 val = FIELD_PREP(GENMASK(7, 0), v[0]) | 2729 FIELD_PREP(GENMASK(15, 8), v[1]) | 2730 FIELD_PREP(GENMASK(23, 16), v[2]) | 2731 FIELD_PREP(GENMASK(31, 24), v[3]); 2732 2733 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 2734 val); 2735 addr += 4; 2736 } 2737 } 2738 } 2739 } 2740 2741 static 2742 void rtw89_phy_set_txpwr_offset_ax(struct rtw89_dev *rtwdev, 2743 const struct rtw89_chan *chan, 2744 enum rtw89_phy_idx phy_idx) 2745 { 2746 struct rtw89_rate_desc desc = { 2747 .nss = RTW89_NSS_1, 2748 .rs = RTW89_RS_OFFSET, 2749 }; 2750 u8 band = chan->band_type; 2751 s8 v[RTW89_RATE_OFFSET_NUM_AX] = {}; 2752 u32 val; 2753 2754 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n"); 2755 2756 for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_NUM_AX; desc.idx++) 2757 v[desc.idx] = rtw89_phy_read_txpwr_byrate(rtwdev, band, 0, &desc); 2758 2759 BUILD_BUG_ON(RTW89_RATE_OFFSET_NUM_AX != 5); 2760 val = FIELD_PREP(GENMASK(3, 0), v[0]) | 2761 FIELD_PREP(GENMASK(7, 4), v[1]) | 2762 FIELD_PREP(GENMASK(11, 8), v[2]) | 2763 FIELD_PREP(GENMASK(15, 12), v[3]) | 2764 FIELD_PREP(GENMASK(19, 16), v[4]); 2765 2766 rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL, 2767 GENMASK(19, 0), val); 2768 } 2769 2770 static void rtw89_phy_set_txpwr_limit_ax(struct rtw89_dev *rtwdev, 2771 const struct rtw89_chan *chan, 2772 enum rtw89_phy_idx phy_idx) 2773 { 2774 u8 max_ntx_num = rtwdev->chip->rf_path_num; 2775 struct rtw89_txpwr_limit_ax lmt; 2776 u8 ch = chan->channel; 2777 u8 bw = chan->band_width; 2778 const s8 *ptr; 2779 u32 addr, val; 2780 u8 i, j; 2781 2782 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2783 "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw); 2784 2785 BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ax) != 2786 RTW89_TXPWR_LMT_PAGE_SIZE_AX); 2787 2788 addr = R_AX_PWR_LMT; 2789 for (i = 0; i < max_ntx_num; i++) { 2790 rtw89_phy_fill_txpwr_limit_ax(rtwdev, chan, &lmt, i); 2791 2792 ptr = (s8 *)&lmt; 2793 for (j = 0; j < RTW89_TXPWR_LMT_PAGE_SIZE_AX; 2794 j += 4, addr += 4, ptr += 4) { 2795 val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | 2796 FIELD_PREP(GENMASK(15, 8), ptr[1]) | 2797 FIELD_PREP(GENMASK(23, 16), ptr[2]) | 2798 FIELD_PREP(GENMASK(31, 24), ptr[3]); 2799 2800 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); 2801 } 2802 } 2803 } 2804 2805 static void rtw89_phy_set_txpwr_limit_ru_ax(struct rtw89_dev *rtwdev, 2806 const struct rtw89_chan *chan, 2807 enum rtw89_phy_idx phy_idx) 2808 { 2809 u8 max_ntx_num = rtwdev->chip->rf_path_num; 2810 struct rtw89_txpwr_limit_ru_ax lmt_ru; 2811 u8 ch = chan->channel; 2812 u8 bw = chan->band_width; 2813 const s8 *ptr; 2814 u32 addr, val; 2815 u8 i, j; 2816 2817 rtw89_debug(rtwdev, RTW89_DBG_TXPWR, 2818 "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw); 2819 2820 BUILD_BUG_ON(sizeof(struct rtw89_txpwr_limit_ru_ax) != 2821 RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX); 2822 2823 addr = R_AX_PWR_RU_LMT; 2824 for (i = 0; i < max_ntx_num; i++) { 2825 rtw89_phy_fill_txpwr_limit_ru_ax(rtwdev, chan, &lmt_ru, i); 2826 2827 ptr = (s8 *)&lmt_ru; 2828 for (j = 0; j < RTW89_TXPWR_LMT_RU_PAGE_SIZE_AX; 2829 j += 4, addr += 4, ptr += 4) { 2830 val = FIELD_PREP(GENMASK(7, 0), ptr[0]) | 2831 FIELD_PREP(GENMASK(15, 8), ptr[1]) | 2832 FIELD_PREP(GENMASK(23, 16), ptr[2]) | 2833 FIELD_PREP(GENMASK(31, 24), ptr[3]); 2834 2835 rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val); 2836 } 2837 } 2838 } 2839 2840 struct rtw89_phy_iter_ra_data { 2841 struct rtw89_dev *rtwdev; 2842 struct sk_buff *c2h; 2843 }; 2844 2845 static void __rtw89_phy_c2h_ra_rpt_iter(struct rtw89_sta_link *rtwsta_link, 2846 struct ieee80211_link_sta *link_sta, 2847 struct rtw89_phy_iter_ra_data *ra_data) 2848 { 2849 struct rtw89_dev *rtwdev = ra_data->rtwdev; 2850 const struct rtw89_c2h_ra_rpt *c2h = 2851 (const struct rtw89_c2h_ra_rpt *)ra_data->c2h->data; 2852 struct rtw89_ra_report *ra_report = &rtwsta_link->ra_report; 2853 const struct rtw89_chip_info *chip = rtwdev->chip; 2854 bool format_v1 = chip->chip_gen == RTW89_CHIP_BE; 2855 u8 mode, rate, bw, giltf, mac_id; 2856 u16 legacy_bitrate; 2857 bool valid; 2858 u8 mcs = 0; 2859 u8 t; 2860 2861 mac_id = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MACID); 2862 if (mac_id != rtwsta_link->mac_id) 2863 return; 2864 2865 rate = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MCSNSS); 2866 bw = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW); 2867 giltf = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_GILTF); 2868 mode = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL); 2869 2870 if (format_v1) { 2871 t = le32_get_bits(c2h->w2, RTW89_C2H_RA_RPT_W2_MCSNSS_B7); 2872 rate |= u8_encode_bits(t, BIT(7)); 2873 t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_BW_B2); 2874 bw |= u8_encode_bits(t, BIT(2)); 2875 t = le32_get_bits(c2h->w3, RTW89_C2H_RA_RPT_W3_MD_SEL_B2); 2876 mode |= u8_encode_bits(t, BIT(2)); 2877 } 2878 2879 if (mode == RTW89_RA_RPT_MODE_LEGACY) { 2880 valid = rtw89_ra_report_to_bitrate(rtwdev, rate, &legacy_bitrate); 2881 if (!valid) 2882 return; 2883 } 2884 2885 memset(&ra_report->txrate, 0, sizeof(ra_report->txrate)); 2886 2887 switch (mode) { 2888 case RTW89_RA_RPT_MODE_LEGACY: 2889 ra_report->txrate.legacy = legacy_bitrate; 2890 break; 2891 case RTW89_RA_RPT_MODE_HT: 2892 ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS; 2893 if (RTW89_CHK_FW_FEATURE(OLD_HT_RA_FORMAT, &rtwdev->fw)) 2894 rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate), 2895 FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate)); 2896 else 2897 rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate); 2898 ra_report->txrate.mcs = rate; 2899 if (giltf) 2900 ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2901 mcs = ra_report->txrate.mcs & 0x07; 2902 break; 2903 case RTW89_RA_RPT_MODE_VHT: 2904 ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; 2905 ra_report->txrate.mcs = format_v1 ? 2906 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : 2907 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); 2908 ra_report->txrate.nss = format_v1 ? 2909 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : 2910 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; 2911 if (giltf) 2912 ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 2913 mcs = ra_report->txrate.mcs; 2914 break; 2915 case RTW89_RA_RPT_MODE_HE: 2916 ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS; 2917 ra_report->txrate.mcs = format_v1 ? 2918 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1) : 2919 u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS); 2920 ra_report->txrate.nss = format_v1 ? 2921 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1 : 2922 u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS) + 1; 2923 if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) 2924 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8; 2925 else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) 2926 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6; 2927 else 2928 ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2; 2929 mcs = ra_report->txrate.mcs; 2930 break; 2931 case RTW89_RA_RPT_MODE_EHT: 2932 ra_report->txrate.flags |= RATE_INFO_FLAGS_EHT_MCS; 2933 ra_report->txrate.mcs = u8_get_bits(rate, RTW89_RA_RATE_MASK_MCS_V1); 2934 ra_report->txrate.nss = u8_get_bits(rate, RTW89_RA_RATE_MASK_NSS_V1) + 1; 2935 if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08) 2936 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_0_8; 2937 else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16) 2938 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_1_6; 2939 else 2940 ra_report->txrate.eht_gi = NL80211_RATE_INFO_EHT_GI_3_2; 2941 mcs = ra_report->txrate.mcs; 2942 break; 2943 } 2944 2945 ra_report->txrate.bw = rtw89_hw_to_rate_info_bw(bw); 2946 ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate); 2947 ra_report->hw_rate = format_v1 ? 2948 u16_encode_bits(mode, RTW89_HW_RATE_V1_MASK_MOD) | 2949 u16_encode_bits(rate, RTW89_HW_RATE_V1_MASK_VAL) : 2950 u16_encode_bits(mode, RTW89_HW_RATE_MASK_MOD) | 2951 u16_encode_bits(rate, RTW89_HW_RATE_MASK_VAL); 2952 ra_report->might_fallback_legacy = mcs <= 2; 2953 link_sta->agg.max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report); 2954 rtwsta_link->max_agg_wait = link_sta->agg.max_rc_amsdu_len / 1500 - 1; 2955 } 2956 2957 static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta) 2958 { 2959 struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data; 2960 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 2961 struct rtw89_sta_link *rtwsta_link; 2962 struct ieee80211_link_sta *link_sta; 2963 unsigned int link_id; 2964 2965 rcu_read_lock(); 2966 2967 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 2968 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, false); 2969 __rtw89_phy_c2h_ra_rpt_iter(rtwsta_link, link_sta, ra_data); 2970 } 2971 2972 rcu_read_unlock(); 2973 } 2974 2975 static void 2976 rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 2977 { 2978 struct rtw89_phy_iter_ra_data ra_data; 2979 2980 ra_data.rtwdev = rtwdev; 2981 ra_data.c2h = c2h; 2982 ieee80211_iterate_stations_atomic(rtwdev->hw, 2983 rtw89_phy_c2h_ra_rpt_iter, 2984 &ra_data); 2985 } 2986 2987 static 2988 void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev, 2989 struct sk_buff *c2h, u32 len) = { 2990 [RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt, 2991 [RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL, 2992 [RTW89_PHY_C2H_FUNC_TXSTS] = NULL, 2993 }; 2994 2995 static void rtw89_phy_c2h_rfk_rpt_log(struct rtw89_dev *rtwdev, 2996 enum rtw89_phy_c2h_rfk_log_func func, 2997 void *content, u16 len) 2998 { 2999 struct rtw89_c2h_rf_txgapk_rpt_log *txgapk; 3000 struct rtw89_c2h_rf_rxdck_rpt_log *rxdck; 3001 struct rtw89_c2h_rf_dack_rpt_log *dack; 3002 struct rtw89_c2h_rf_tssi_rpt_log *tssi; 3003 struct rtw89_c2h_rf_dpk_rpt_log *dpk; 3004 struct rtw89_c2h_rf_iqk_rpt_log *iqk; 3005 int i, j, k; 3006 3007 switch (func) { 3008 case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK: 3009 if (len != sizeof(*iqk)) 3010 goto out; 3011 3012 iqk = content; 3013 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3014 "[IQK] iqk->is_iqk_init = %x\n", iqk->is_iqk_init); 3015 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3016 "[IQK] iqk->is_reload = %x\n", iqk->is_reload); 3017 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3018 "[IQK] iqk->is_nbiqk = %x\n", iqk->is_nbiqk); 3019 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3020 "[IQK] iqk->txiqk_en = %x\n", iqk->txiqk_en); 3021 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3022 "[IQK] iqk->rxiqk_en = %x\n", iqk->rxiqk_en); 3023 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3024 "[IQK] iqk->lok_en = %x\n", iqk->lok_en); 3025 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3026 "[IQK] iqk->iqk_xym_en = %x\n", iqk->iqk_xym_en); 3027 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3028 "[IQK] iqk->iqk_sram_en = %x\n", iqk->iqk_sram_en); 3029 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3030 "[IQK] iqk->iqk_fft_en = %x\n", iqk->iqk_fft_en); 3031 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3032 "[IQK] iqk->is_fw_iqk = %x\n", iqk->is_fw_iqk); 3033 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3034 "[IQK] iqk->is_iqk_enable = %x\n", iqk->is_iqk_enable); 3035 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3036 "[IQK] iqk->iqk_cfir_en = %x\n", iqk->iqk_cfir_en); 3037 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3038 "[IQK] iqk->thermal_rek_en = %x\n", iqk->thermal_rek_en); 3039 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3040 "[IQK] iqk->version = %x\n", iqk->version); 3041 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3042 "[IQK] iqk->phy = %x\n", iqk->phy); 3043 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3044 "[IQK] iqk->fwk_status = %x\n", iqk->fwk_status); 3045 3046 for (i = 0; i < 2; i++) { 3047 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3048 "[IQK] ======== Path %x ========\n", i); 3049 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_band[%d] = %x\n", 3050 i, iqk->iqk_band[i]); 3051 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_ch[%d] = %x\n", 3052 i, iqk->iqk_ch[i]); 3053 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_bw[%d] = %x\n", 3054 i, iqk->iqk_bw[i]); 3055 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_idac[%d] = %x\n", 3056 i, le32_to_cpu(iqk->lok_idac[i])); 3057 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->lok_vbuf[%d] = %x\n", 3058 i, le32_to_cpu(iqk->lok_vbuf[i])); 3059 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_tx_fail[%d] = %x\n", 3060 i, iqk->iqk_tx_fail[i]); 3061 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] iqk->iqk_rx_fail[%d] = %x\n", 3062 i, iqk->iqk_rx_fail[i]); 3063 for (j = 0; j < 4; j++) 3064 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3065 "[IQK] iqk->rftxgain[%d][%d] = %x\n", 3066 i, j, le32_to_cpu(iqk->rftxgain[i][j])); 3067 for (j = 0; j < 4; j++) 3068 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3069 "[IQK] iqk->tx_xym[%d][%d] = %x\n", 3070 i, j, le32_to_cpu(iqk->tx_xym[i][j])); 3071 for (j = 0; j < 4; j++) 3072 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3073 "[IQK] iqk->rfrxgain[%d][%d] = %x\n", 3074 i, j, le32_to_cpu(iqk->rfrxgain[i][j])); 3075 for (j = 0; j < 4; j++) 3076 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3077 "[IQK] iqk->rx_xym[%d][%d] = %x\n", 3078 i, j, le32_to_cpu(iqk->rx_xym[i][j])); 3079 } 3080 return; 3081 case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK: 3082 if (len != sizeof(*dpk)) 3083 goto out; 3084 3085 dpk = content; 3086 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3087 "DPK ver:%d idx:%2ph band:%2ph bw:%2ph ch:%2ph path:%2ph\n", 3088 dpk->ver, dpk->idx, dpk->band, dpk->bw, dpk->ch, dpk->path_ok); 3089 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3090 "DPK txagc:%2ph ther:%2ph gs:%2ph dc_i:%4ph dc_q:%4ph\n", 3091 dpk->txagc, dpk->ther, dpk->gs, dpk->dc_i, dpk->dc_q); 3092 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3093 "DPK corr_v:%2ph corr_i:%2ph to:%2ph ov:%2ph\n", 3094 dpk->corr_val, dpk->corr_idx, dpk->is_timeout, dpk->rxbb_ov); 3095 return; 3096 case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK: 3097 if (len != sizeof(*dack)) 3098 goto out; 3099 3100 dack = content; 3101 3102 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]FWDACK SUMMARY!!!!!\n"); 3103 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3104 "[DACK]FWDACK ver = 0x%x, FWDACK rpt_ver = 0x%x, driver rpt_ver = 0x%x\n", 3105 dack->fwdack_ver, dack->fwdack_info_ver, 0x2); 3106 3107 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3108 "[DACK]timeout code = [0x%x 0x%x 0x%x 0x%x 0x%x]\n", 3109 dack->addck_timeout, dack->cdack_timeout, dack->dadck_timeout, 3110 dack->adgaink_timeout, dack->msbk_timeout); 3111 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3112 "[DACK]DACK fail = 0x%x\n", dack->dack_fail); 3113 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3114 "[DACK]S0 WBADCK = [0x%x]\n", dack->wbdck_d[0]); 3115 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3116 "[DACK]S1 WBADCK = [0x%x]\n", dack->wbdck_d[1]); 3117 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3118 "[DACK]DRCK = [0x%x]\n", dack->rck_d); 3119 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK ic = [0x%x, 0x%x]\n", 3120 dack->cdack_d[0][0][0], dack->cdack_d[0][0][1]); 3121 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 CDACK qc = [0x%x, 0x%x]\n", 3122 dack->cdack_d[0][1][0], dack->cdack_d[0][1][1]); 3123 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK ic = [0x%x, 0x%x]\n", 3124 dack->cdack_d[1][0][0], dack->cdack_d[1][0][1]); 3125 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 CDACK qc = [0x%x, 0x%x]\n", 3126 dack->cdack_d[1][1][0], dack->cdack_d[1][1][1]); 3127 3128 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK ic = [0x%x, 0x%x]\n", 3129 ((u32)dack->addck2_hd[0][0][0] << 8) | dack->addck2_ld[0][0][0], 3130 ((u32)dack->addck2_hd[0][0][1] << 8) | dack->addck2_ld[0][0][1]); 3131 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_DCK qc = [0x%x, 0x%x]\n", 3132 ((u32)dack->addck2_hd[0][1][0] << 8) | dack->addck2_ld[0][1][0], 3133 ((u32)dack->addck2_hd[0][1][1] << 8) | dack->addck2_ld[0][1][1]); 3134 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK ic = [0x%x, 0x%x]\n", 3135 ((u32)dack->addck2_hd[1][0][0] << 8) | dack->addck2_ld[1][0][0], 3136 ((u32)dack->addck2_hd[1][0][1] << 8) | dack->addck2_ld[1][0][1]); 3137 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_DCK qc = [0x%x, 0x%x]\n", 3138 ((u32)dack->addck2_hd[1][1][0] << 8) | dack->addck2_ld[1][1][0], 3139 ((u32)dack->addck2_hd[1][1][1] << 8) | dack->addck2_ld[1][1][1]); 3140 3141 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADC_GAINK ic = 0x%x, qc = 0x%x\n", 3142 dack->adgaink_d[0][0], dack->adgaink_d[0][1]); 3143 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADC_GAINK ic = 0x%x, qc = 0x%x\n", 3144 dack->adgaink_d[1][0], dack->adgaink_d[1][1]); 3145 3146 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", 3147 dack->dadck_d[0][0], dack->dadck_d[0][1]); 3148 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", 3149 dack->dadck_d[1][0], dack->dadck_d[1][1]); 3150 3151 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 biask iqc = 0x%x\n", 3152 ((u32)dack->biask_hd[0][0] << 8) | dack->biask_ld[0][0]); 3153 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 biask iqc = 0x%x\n", 3154 ((u32)dack->biask_hd[1][0] << 8) | dack->biask_ld[1][0]); 3155 3156 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); 3157 for (i = 0; i < 0x10; i++) 3158 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 3159 dack->msbk_d[0][0][i]); 3160 3161 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); 3162 for (i = 0; i < 0x10; i++) 3163 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 3164 dack->msbk_d[0][1][i]); 3165 3166 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); 3167 for (i = 0; i < 0x10; i++) 3168 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 3169 dack->msbk_d[1][0][i]); 3170 3171 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); 3172 for (i = 0; i < 0x10; i++) 3173 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", 3174 dack->msbk_d[1][1][i]); 3175 return; 3176 case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK: 3177 if (len != sizeof(*rxdck)) 3178 goto out; 3179 3180 rxdck = content; 3181 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3182 "RXDCK ver:%d band:%2ph bw:%2ph ch:%2ph to:%2ph\n", 3183 rxdck->ver, rxdck->band, rxdck->bw, rxdck->ch, 3184 rxdck->timeout); 3185 return; 3186 case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI: 3187 if (len != sizeof(*tssi)) 3188 goto out; 3189 3190 tssi = content; 3191 for (i = 0; i < 2; i++) { 3192 for (j = 0; j < 2; j++) { 3193 for (k = 0; k < 4; k++) { 3194 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3195 "[TSSI] alignment_power_cw_h[%d][%d][%d]=%d\n", 3196 i, j, k, tssi->alignment_power_cw_h[i][j][k]); 3197 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3198 "[TSSI] alignment_power_cw_l[%d][%d][%d]=%d\n", 3199 i, j, k, tssi->alignment_power_cw_l[i][j][k]); 3200 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3201 "[TSSI] alignment_power[%d][%d][%d]=%d\n", 3202 i, j, k, tssi->alignment_power[i][j][k]); 3203 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3204 "[TSSI] alignment_power_cw[%d][%d][%d]=%d\n", 3205 i, j, k, 3206 (tssi->alignment_power_cw_h[i][j][k] << 8) + 3207 tssi->alignment_power_cw_l[i][j][k]); 3208 } 3209 3210 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3211 "[TSSI] tssi_alimk_state[%d][%d]=%d\n", 3212 i, j, tssi->tssi_alimk_state[i][j]); 3213 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3214 "[TSSI] default_txagc_offset[%d]=%d\n", 3215 j, tssi->default_txagc_offset[0][j]); 3216 } 3217 } 3218 return; 3219 case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK: 3220 if (len != sizeof(*txgapk)) 3221 goto out; 3222 3223 txgapk = content; 3224 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3225 "[TXGAPK]rpt r0x8010[0]=0x%x, r0x8010[1]=0x%x\n", 3226 le32_to_cpu(txgapk->r0x8010[0]), 3227 le32_to_cpu(txgapk->r0x8010[1])); 3228 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_id = %d\n", 3229 txgapk->chk_id); 3230 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt chk_cnt = %d\n", 3231 le32_to_cpu(txgapk->chk_cnt)); 3232 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt ver = 0x%x\n", 3233 txgapk->ver); 3234 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt rsv1 = %d\n", 3235 txgapk->rsv1); 3236 3237 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[0] = %*ph\n", 3238 (int)sizeof(txgapk->track_d[0]), txgapk->track_d[0]); 3239 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[0] = %*ph\n", 3240 (int)sizeof(txgapk->power_d[0]), txgapk->power_d[0]); 3241 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt track_d[1] = %*ph\n", 3242 (int)sizeof(txgapk->track_d[1]), txgapk->track_d[1]); 3243 rtw89_debug(rtwdev, RTW89_DBG_RFK, "[TXGAPK]rpt power_d[1] = %*ph\n", 3244 (int)sizeof(txgapk->power_d[1]), txgapk->power_d[1]); 3245 return; 3246 default: 3247 break; 3248 } 3249 3250 out: 3251 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3252 "unexpected RFK func %d report log with length %d\n", func, len); 3253 } 3254 3255 static bool rtw89_phy_c2h_rfk_run_log(struct rtw89_dev *rtwdev, 3256 enum rtw89_phy_c2h_rfk_log_func func, 3257 void *content, u16 len) 3258 { 3259 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 3260 const struct rtw89_c2h_rf_run_log *log = content; 3261 const struct rtw89_fw_element_hdr *elm; 3262 u32 fmt_idx; 3263 u16 offset; 3264 3265 if (sizeof(*log) != len) 3266 return false; 3267 3268 if (!elm_info->rfk_log_fmt) 3269 return false; 3270 3271 elm = elm_info->rfk_log_fmt->elm[func]; 3272 fmt_idx = le32_to_cpu(log->fmt_idx); 3273 if (!elm || fmt_idx >= elm->u.rfk_log_fmt.nr) 3274 return false; 3275 3276 offset = le16_to_cpu(elm->u.rfk_log_fmt.offset[fmt_idx]); 3277 if (offset == 0) 3278 return false; 3279 3280 rtw89_debug(rtwdev, RTW89_DBG_RFK, &elm->u.common.contents[offset], 3281 le32_to_cpu(log->arg[0]), le32_to_cpu(log->arg[1]), 3282 le32_to_cpu(log->arg[2]), le32_to_cpu(log->arg[3])); 3283 3284 return true; 3285 } 3286 3287 static void rtw89_phy_c2h_rfk_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 3288 u32 len, enum rtw89_phy_c2h_rfk_log_func func, 3289 const char *rfk_name) 3290 { 3291 struct rtw89_c2h_hdr *c2h_hdr = (struct rtw89_c2h_hdr *)c2h->data; 3292 struct rtw89_c2h_rf_log_hdr *log_hdr; 3293 void *log_ptr = c2h_hdr; 3294 u16 content_len; 3295 u16 chunk_len; 3296 bool handled; 3297 3298 if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK)) 3299 return; 3300 3301 log_ptr += sizeof(*c2h_hdr); 3302 len -= sizeof(*c2h_hdr); 3303 3304 while (len > sizeof(*log_hdr)) { 3305 log_hdr = log_ptr; 3306 content_len = le16_to_cpu(log_hdr->len); 3307 chunk_len = content_len + sizeof(*log_hdr); 3308 3309 if (chunk_len > len) 3310 break; 3311 3312 switch (log_hdr->type) { 3313 case RTW89_RF_RUN_LOG: 3314 handled = rtw89_phy_c2h_rfk_run_log(rtwdev, func, 3315 log_hdr->content, content_len); 3316 if (handled) 3317 break; 3318 3319 rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s run: %*ph\n", 3320 rfk_name, content_len, log_hdr->content); 3321 break; 3322 case RTW89_RF_RPT_LOG: 3323 rtw89_phy_c2h_rfk_rpt_log(rtwdev, func, 3324 log_hdr->content, content_len); 3325 break; 3326 default: 3327 return; 3328 } 3329 3330 log_ptr += chunk_len; 3331 len -= chunk_len; 3332 } 3333 } 3334 3335 static void 3336 rtw89_phy_c2h_rfk_log_iqk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3337 { 3338 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3339 RTW89_PHY_C2H_RFK_LOG_FUNC_IQK, "IQK"); 3340 } 3341 3342 static void 3343 rtw89_phy_c2h_rfk_log_dpk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3344 { 3345 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3346 RTW89_PHY_C2H_RFK_LOG_FUNC_DPK, "DPK"); 3347 } 3348 3349 static void 3350 rtw89_phy_c2h_rfk_log_dack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3351 { 3352 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3353 RTW89_PHY_C2H_RFK_LOG_FUNC_DACK, "DACK"); 3354 } 3355 3356 static void 3357 rtw89_phy_c2h_rfk_log_rxdck(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3358 { 3359 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3360 RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK, "RX_DCK"); 3361 } 3362 3363 static void 3364 rtw89_phy_c2h_rfk_log_tssi(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3365 { 3366 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3367 RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI, "TSSI"); 3368 } 3369 3370 static void 3371 rtw89_phy_c2h_rfk_log_txgapk(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3372 { 3373 rtw89_phy_c2h_rfk_log(rtwdev, c2h, len, 3374 RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK, "TXGAPK"); 3375 } 3376 3377 static 3378 void (* const rtw89_phy_c2h_rfk_log_handler[])(struct rtw89_dev *rtwdev, 3379 struct sk_buff *c2h, u32 len) = { 3380 [RTW89_PHY_C2H_RFK_LOG_FUNC_IQK] = rtw89_phy_c2h_rfk_log_iqk, 3381 [RTW89_PHY_C2H_RFK_LOG_FUNC_DPK] = rtw89_phy_c2h_rfk_log_dpk, 3382 [RTW89_PHY_C2H_RFK_LOG_FUNC_DACK] = rtw89_phy_c2h_rfk_log_dack, 3383 [RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK] = rtw89_phy_c2h_rfk_log_rxdck, 3384 [RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI] = rtw89_phy_c2h_rfk_log_tssi, 3385 [RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK] = rtw89_phy_c2h_rfk_log_txgapk, 3386 }; 3387 3388 static 3389 void rtw89_phy_rfk_report_prep(struct rtw89_dev *rtwdev) 3390 { 3391 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3392 3393 wait->state = RTW89_RFK_STATE_START; 3394 wait->start_time = ktime_get(); 3395 reinit_completion(&wait->completion); 3396 } 3397 3398 static 3399 int rtw89_phy_rfk_report_wait(struct rtw89_dev *rtwdev, const char *rfk_name, 3400 unsigned int ms) 3401 { 3402 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3403 unsigned long time_left; 3404 3405 /* Since we can't receive C2H event during SER, use a fixed delay. */ 3406 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) { 3407 fsleep(1000 * ms / 2); 3408 goto out; 3409 } 3410 3411 time_left = wait_for_completion_timeout(&wait->completion, 3412 msecs_to_jiffies(ms)); 3413 if (time_left == 0) { 3414 rtw89_warn(rtwdev, "failed to wait RF %s\n", rfk_name); 3415 return -ETIMEDOUT; 3416 } else if (wait->state != RTW89_RFK_STATE_OK) { 3417 rtw89_warn(rtwdev, "failed to do RF %s result from state %d\n", 3418 rfk_name, wait->state); 3419 return -EFAULT; 3420 } 3421 3422 out: 3423 rtw89_debug(rtwdev, RTW89_DBG_RFK, "RF %s takes %lld ms to complete\n", 3424 rfk_name, ktime_ms_delta(ktime_get(), wait->start_time)); 3425 3426 return 0; 3427 } 3428 3429 static void 3430 rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3431 { 3432 const struct rtw89_c2h_rfk_report *report = 3433 (const struct rtw89_c2h_rfk_report *)c2h->data; 3434 struct rtw89_rfk_wait_info *wait = &rtwdev->rfk_wait; 3435 3436 wait->state = report->state; 3437 wait->version = report->version; 3438 3439 complete(&wait->completion); 3440 3441 rtw89_debug(rtwdev, RTW89_DBG_RFK, 3442 "RFK report state %d with version %d (%*ph)\n", 3443 wait->state, wait->version, 3444 (int)(len - sizeof(report->hdr)), &report->state); 3445 } 3446 3447 static void 3448 rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3449 { 3450 } 3451 3452 static 3453 void (* const rtw89_phy_c2h_rfk_report_handler[])(struct rtw89_dev *rtwdev, 3454 struct sk_buff *c2h, u32 len) = { 3455 [RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE] = rtw89_phy_c2h_rfk_report_state, 3456 [RTW89_PHY_C2H_RFK_LOG_TAS_PWR] = rtw89_phy_c2h_rfk_log_tas_pwr, 3457 }; 3458 3459 bool rtw89_phy_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func) 3460 { 3461 switch (class) { 3462 case RTW89_PHY_C2H_RFK_LOG: 3463 switch (func) { 3464 case RTW89_PHY_C2H_RFK_LOG_FUNC_IQK: 3465 case RTW89_PHY_C2H_RFK_LOG_FUNC_DPK: 3466 case RTW89_PHY_C2H_RFK_LOG_FUNC_DACK: 3467 case RTW89_PHY_C2H_RFK_LOG_FUNC_RXDCK: 3468 case RTW89_PHY_C2H_RFK_LOG_FUNC_TSSI: 3469 case RTW89_PHY_C2H_RFK_LOG_FUNC_TXGAPK: 3470 return true; 3471 default: 3472 return false; 3473 } 3474 case RTW89_PHY_C2H_RFK_REPORT: 3475 switch (func) { 3476 case RTW89_PHY_C2H_RFK_REPORT_FUNC_STATE: 3477 return true; 3478 default: 3479 return false; 3480 } 3481 default: 3482 return false; 3483 } 3484 } 3485 3486 void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3487 u32 len, u8 class, u8 func) 3488 { 3489 void (*handler)(struct rtw89_dev *rtwdev, 3490 struct sk_buff *c2h, u32 len) = NULL; 3491 3492 switch (class) { 3493 case RTW89_PHY_C2H_CLASS_RA: 3494 if (func < RTW89_PHY_C2H_FUNC_RA_MAX) 3495 handler = rtw89_phy_c2h_ra_handler[func]; 3496 break; 3497 case RTW89_PHY_C2H_RFK_LOG: 3498 if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_log_handler)) 3499 handler = rtw89_phy_c2h_rfk_log_handler[func]; 3500 break; 3501 case RTW89_PHY_C2H_RFK_REPORT: 3502 if (func < ARRAY_SIZE(rtw89_phy_c2h_rfk_report_handler)) 3503 handler = rtw89_phy_c2h_rfk_report_handler[func]; 3504 break; 3505 case RTW89_PHY_C2H_CLASS_DM: 3506 if (func == RTW89_PHY_C2H_DM_FUNC_LOWRT_RTY) 3507 return; 3508 fallthrough; 3509 default: 3510 rtw89_info(rtwdev, "PHY c2h class %d not support\n", class); 3511 return; 3512 } 3513 if (!handler) { 3514 rtw89_info(rtwdev, "PHY c2h class %d func %d not support\n", class, 3515 func); 3516 return; 3517 } 3518 handler(rtwdev, skb, len); 3519 } 3520 3521 int rtw89_phy_rfk_pre_ntfy_and_wait(struct rtw89_dev *rtwdev, 3522 enum rtw89_phy_idx phy_idx, 3523 unsigned int ms) 3524 { 3525 int ret; 3526 3527 rtw89_phy_rfk_report_prep(rtwdev); 3528 3529 ret = rtw89_fw_h2c_rf_pre_ntfy(rtwdev, phy_idx); 3530 if (ret) 3531 return ret; 3532 3533 return rtw89_phy_rfk_report_wait(rtwdev, "PRE_NTFY", ms); 3534 } 3535 EXPORT_SYMBOL(rtw89_phy_rfk_pre_ntfy_and_wait); 3536 3537 int rtw89_phy_rfk_tssi_and_wait(struct rtw89_dev *rtwdev, 3538 enum rtw89_phy_idx phy_idx, 3539 const struct rtw89_chan *chan, 3540 enum rtw89_tssi_mode tssi_mode, 3541 unsigned int ms) 3542 { 3543 int ret; 3544 3545 rtw89_phy_rfk_report_prep(rtwdev); 3546 3547 ret = rtw89_fw_h2c_rf_tssi(rtwdev, phy_idx, chan, tssi_mode); 3548 if (ret) 3549 return ret; 3550 3551 return rtw89_phy_rfk_report_wait(rtwdev, "TSSI", ms); 3552 } 3553 EXPORT_SYMBOL(rtw89_phy_rfk_tssi_and_wait); 3554 3555 int rtw89_phy_rfk_iqk_and_wait(struct rtw89_dev *rtwdev, 3556 enum rtw89_phy_idx phy_idx, 3557 const struct rtw89_chan *chan, 3558 unsigned int ms) 3559 { 3560 int ret; 3561 3562 rtw89_phy_rfk_report_prep(rtwdev); 3563 3564 ret = rtw89_fw_h2c_rf_iqk(rtwdev, phy_idx, chan); 3565 if (ret) 3566 return ret; 3567 3568 return rtw89_phy_rfk_report_wait(rtwdev, "IQK", ms); 3569 } 3570 EXPORT_SYMBOL(rtw89_phy_rfk_iqk_and_wait); 3571 3572 int rtw89_phy_rfk_dpk_and_wait(struct rtw89_dev *rtwdev, 3573 enum rtw89_phy_idx phy_idx, 3574 const struct rtw89_chan *chan, 3575 unsigned int ms) 3576 { 3577 int ret; 3578 3579 rtw89_phy_rfk_report_prep(rtwdev); 3580 3581 ret = rtw89_fw_h2c_rf_dpk(rtwdev, phy_idx, chan); 3582 if (ret) 3583 return ret; 3584 3585 return rtw89_phy_rfk_report_wait(rtwdev, "DPK", ms); 3586 } 3587 EXPORT_SYMBOL(rtw89_phy_rfk_dpk_and_wait); 3588 3589 int rtw89_phy_rfk_txgapk_and_wait(struct rtw89_dev *rtwdev, 3590 enum rtw89_phy_idx phy_idx, 3591 const struct rtw89_chan *chan, 3592 unsigned int ms) 3593 { 3594 int ret; 3595 3596 rtw89_phy_rfk_report_prep(rtwdev); 3597 3598 ret = rtw89_fw_h2c_rf_txgapk(rtwdev, phy_idx, chan); 3599 if (ret) 3600 return ret; 3601 3602 return rtw89_phy_rfk_report_wait(rtwdev, "TXGAPK", ms); 3603 } 3604 EXPORT_SYMBOL(rtw89_phy_rfk_txgapk_and_wait); 3605 3606 int rtw89_phy_rfk_dack_and_wait(struct rtw89_dev *rtwdev, 3607 enum rtw89_phy_idx phy_idx, 3608 const struct rtw89_chan *chan, 3609 unsigned int ms) 3610 { 3611 int ret; 3612 3613 rtw89_phy_rfk_report_prep(rtwdev); 3614 3615 ret = rtw89_fw_h2c_rf_dack(rtwdev, phy_idx, chan); 3616 if (ret) 3617 return ret; 3618 3619 return rtw89_phy_rfk_report_wait(rtwdev, "DACK", ms); 3620 } 3621 EXPORT_SYMBOL(rtw89_phy_rfk_dack_and_wait); 3622 3623 int rtw89_phy_rfk_rxdck_and_wait(struct rtw89_dev *rtwdev, 3624 enum rtw89_phy_idx phy_idx, 3625 const struct rtw89_chan *chan, 3626 bool is_chl_k, unsigned int ms) 3627 { 3628 int ret; 3629 3630 rtw89_phy_rfk_report_prep(rtwdev); 3631 3632 ret = rtw89_fw_h2c_rf_rxdck(rtwdev, phy_idx, chan, is_chl_k); 3633 if (ret) 3634 return ret; 3635 3636 return rtw89_phy_rfk_report_wait(rtwdev, "RX_DCK", ms); 3637 } 3638 EXPORT_SYMBOL(rtw89_phy_rfk_rxdck_and_wait); 3639 3640 static u32 phy_tssi_get_cck_group(u8 ch) 3641 { 3642 switch (ch) { 3643 case 1 ... 2: 3644 return 0; 3645 case 3 ... 5: 3646 return 1; 3647 case 6 ... 8: 3648 return 2; 3649 case 9 ... 11: 3650 return 3; 3651 case 12 ... 13: 3652 return 4; 3653 case 14: 3654 return 5; 3655 } 3656 3657 return 0; 3658 } 3659 3660 #define PHY_TSSI_EXTRA_GROUP_BIT BIT(31) 3661 #define PHY_TSSI_EXTRA_GROUP(idx) (PHY_TSSI_EXTRA_GROUP_BIT | (idx)) 3662 #define PHY_IS_TSSI_EXTRA_GROUP(group) ((group) & PHY_TSSI_EXTRA_GROUP_BIT) 3663 #define PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) \ 3664 ((group) & ~PHY_TSSI_EXTRA_GROUP_BIT) 3665 #define PHY_TSSI_EXTRA_GET_GROUP_IDX2(group) \ 3666 (PHY_TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) 3667 3668 static u32 phy_tssi_get_ofdm_group(u8 ch) 3669 { 3670 switch (ch) { 3671 case 1 ... 2: 3672 return 0; 3673 case 3 ... 5: 3674 return 1; 3675 case 6 ... 8: 3676 return 2; 3677 case 9 ... 11: 3678 return 3; 3679 case 12 ... 14: 3680 return 4; 3681 case 36 ... 40: 3682 return 5; 3683 case 41 ... 43: 3684 return PHY_TSSI_EXTRA_GROUP(5); 3685 case 44 ... 48: 3686 return 6; 3687 case 49 ... 51: 3688 return PHY_TSSI_EXTRA_GROUP(6); 3689 case 52 ... 56: 3690 return 7; 3691 case 57 ... 59: 3692 return PHY_TSSI_EXTRA_GROUP(7); 3693 case 60 ... 64: 3694 return 8; 3695 case 100 ... 104: 3696 return 9; 3697 case 105 ... 107: 3698 return PHY_TSSI_EXTRA_GROUP(9); 3699 case 108 ... 112: 3700 return 10; 3701 case 113 ... 115: 3702 return PHY_TSSI_EXTRA_GROUP(10); 3703 case 116 ... 120: 3704 return 11; 3705 case 121 ... 123: 3706 return PHY_TSSI_EXTRA_GROUP(11); 3707 case 124 ... 128: 3708 return 12; 3709 case 129 ... 131: 3710 return PHY_TSSI_EXTRA_GROUP(12); 3711 case 132 ... 136: 3712 return 13; 3713 case 137 ... 139: 3714 return PHY_TSSI_EXTRA_GROUP(13); 3715 case 140 ... 144: 3716 return 14; 3717 case 149 ... 153: 3718 return 15; 3719 case 154 ... 156: 3720 return PHY_TSSI_EXTRA_GROUP(15); 3721 case 157 ... 161: 3722 return 16; 3723 case 162 ... 164: 3724 return PHY_TSSI_EXTRA_GROUP(16); 3725 case 165 ... 169: 3726 return 17; 3727 case 170 ... 172: 3728 return PHY_TSSI_EXTRA_GROUP(17); 3729 case 173 ... 177: 3730 return 18; 3731 } 3732 3733 return 0; 3734 } 3735 3736 static u32 phy_tssi_get_6g_ofdm_group(u8 ch) 3737 { 3738 switch (ch) { 3739 case 1 ... 5: 3740 return 0; 3741 case 6 ... 8: 3742 return PHY_TSSI_EXTRA_GROUP(0); 3743 case 9 ... 13: 3744 return 1; 3745 case 14 ... 16: 3746 return PHY_TSSI_EXTRA_GROUP(1); 3747 case 17 ... 21: 3748 return 2; 3749 case 22 ... 24: 3750 return PHY_TSSI_EXTRA_GROUP(2); 3751 case 25 ... 29: 3752 return 3; 3753 case 33 ... 37: 3754 return 4; 3755 case 38 ... 40: 3756 return PHY_TSSI_EXTRA_GROUP(4); 3757 case 41 ... 45: 3758 return 5; 3759 case 46 ... 48: 3760 return PHY_TSSI_EXTRA_GROUP(5); 3761 case 49 ... 53: 3762 return 6; 3763 case 54 ... 56: 3764 return PHY_TSSI_EXTRA_GROUP(6); 3765 case 57 ... 61: 3766 return 7; 3767 case 65 ... 69: 3768 return 8; 3769 case 70 ... 72: 3770 return PHY_TSSI_EXTRA_GROUP(8); 3771 case 73 ... 77: 3772 return 9; 3773 case 78 ... 80: 3774 return PHY_TSSI_EXTRA_GROUP(9); 3775 case 81 ... 85: 3776 return 10; 3777 case 86 ... 88: 3778 return PHY_TSSI_EXTRA_GROUP(10); 3779 case 89 ... 93: 3780 return 11; 3781 case 97 ... 101: 3782 return 12; 3783 case 102 ... 104: 3784 return PHY_TSSI_EXTRA_GROUP(12); 3785 case 105 ... 109: 3786 return 13; 3787 case 110 ... 112: 3788 return PHY_TSSI_EXTRA_GROUP(13); 3789 case 113 ... 117: 3790 return 14; 3791 case 118 ... 120: 3792 return PHY_TSSI_EXTRA_GROUP(14); 3793 case 121 ... 125: 3794 return 15; 3795 case 129 ... 133: 3796 return 16; 3797 case 134 ... 136: 3798 return PHY_TSSI_EXTRA_GROUP(16); 3799 case 137 ... 141: 3800 return 17; 3801 case 142 ... 144: 3802 return PHY_TSSI_EXTRA_GROUP(17); 3803 case 145 ... 149: 3804 return 18; 3805 case 150 ... 152: 3806 return PHY_TSSI_EXTRA_GROUP(18); 3807 case 153 ... 157: 3808 return 19; 3809 case 161 ... 165: 3810 return 20; 3811 case 166 ... 168: 3812 return PHY_TSSI_EXTRA_GROUP(20); 3813 case 169 ... 173: 3814 return 21; 3815 case 174 ... 176: 3816 return PHY_TSSI_EXTRA_GROUP(21); 3817 case 177 ... 181: 3818 return 22; 3819 case 182 ... 184: 3820 return PHY_TSSI_EXTRA_GROUP(22); 3821 case 185 ... 189: 3822 return 23; 3823 case 193 ... 197: 3824 return 24; 3825 case 198 ... 200: 3826 return PHY_TSSI_EXTRA_GROUP(24); 3827 case 201 ... 205: 3828 return 25; 3829 case 206 ... 208: 3830 return PHY_TSSI_EXTRA_GROUP(25); 3831 case 209 ... 213: 3832 return 26; 3833 case 214 ... 216: 3834 return PHY_TSSI_EXTRA_GROUP(26); 3835 case 217 ... 221: 3836 return 27; 3837 case 225 ... 229: 3838 return 28; 3839 case 230 ... 232: 3840 return PHY_TSSI_EXTRA_GROUP(28); 3841 case 233 ... 237: 3842 return 29; 3843 case 238 ... 240: 3844 return PHY_TSSI_EXTRA_GROUP(29); 3845 case 241 ... 245: 3846 return 30; 3847 case 246 ... 248: 3848 return PHY_TSSI_EXTRA_GROUP(30); 3849 case 249 ... 253: 3850 return 31; 3851 } 3852 3853 return 0; 3854 } 3855 3856 static u32 phy_tssi_get_trim_group(u8 ch) 3857 { 3858 switch (ch) { 3859 case 1 ... 8: 3860 return 0; 3861 case 9 ... 14: 3862 return 1; 3863 case 36 ... 48: 3864 return 2; 3865 case 49 ... 51: 3866 return PHY_TSSI_EXTRA_GROUP(2); 3867 case 52 ... 64: 3868 return 3; 3869 case 100 ... 112: 3870 return 4; 3871 case 113 ... 115: 3872 return PHY_TSSI_EXTRA_GROUP(4); 3873 case 116 ... 128: 3874 return 5; 3875 case 132 ... 144: 3876 return 6; 3877 case 149 ... 177: 3878 return 7; 3879 } 3880 3881 return 0; 3882 } 3883 3884 static u32 phy_tssi_get_6g_trim_group(u8 ch) 3885 { 3886 switch (ch) { 3887 case 1 ... 13: 3888 return 0; 3889 case 14 ... 16: 3890 return PHY_TSSI_EXTRA_GROUP(0); 3891 case 17 ... 29: 3892 return 1; 3893 case 33 ... 45: 3894 return 2; 3895 case 46 ... 48: 3896 return PHY_TSSI_EXTRA_GROUP(2); 3897 case 49 ... 61: 3898 return 3; 3899 case 65 ... 77: 3900 return 4; 3901 case 78 ... 80: 3902 return PHY_TSSI_EXTRA_GROUP(4); 3903 case 81 ... 93: 3904 return 5; 3905 case 97 ... 109: 3906 return 6; 3907 case 110 ... 112: 3908 return PHY_TSSI_EXTRA_GROUP(6); 3909 case 113 ... 125: 3910 return 7; 3911 case 129 ... 141: 3912 return 8; 3913 case 142 ... 144: 3914 return PHY_TSSI_EXTRA_GROUP(8); 3915 case 145 ... 157: 3916 return 9; 3917 case 161 ... 173: 3918 return 10; 3919 case 174 ... 176: 3920 return PHY_TSSI_EXTRA_GROUP(10); 3921 case 177 ... 189: 3922 return 11; 3923 case 193 ... 205: 3924 return 12; 3925 case 206 ... 208: 3926 return PHY_TSSI_EXTRA_GROUP(12); 3927 case 209 ... 221: 3928 return 13; 3929 case 225 ... 237: 3930 return 14; 3931 case 238 ... 240: 3932 return PHY_TSSI_EXTRA_GROUP(14); 3933 case 241 ... 253: 3934 return 15; 3935 } 3936 3937 return 0; 3938 } 3939 3940 static s8 phy_tssi_get_ofdm_de(struct rtw89_dev *rtwdev, 3941 enum rtw89_phy_idx phy, 3942 const struct rtw89_chan *chan, 3943 enum rtw89_rf_path path) 3944 { 3945 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 3946 enum rtw89_band band = chan->band_type; 3947 u8 ch = chan->channel; 3948 u32 gidx_1st; 3949 u32 gidx_2nd; 3950 s8 de_1st; 3951 s8 de_2nd; 3952 u32 gidx; 3953 s8 val; 3954 3955 if (band == RTW89_BAND_6G) 3956 goto calc_6g; 3957 3958 gidx = phy_tssi_get_ofdm_group(ch); 3959 3960 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3961 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3962 path, gidx); 3963 3964 if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) { 3965 gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3966 gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3967 de_1st = tssi_info->tssi_mcs[path][gidx_1st]; 3968 de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; 3969 val = (de_1st + de_2nd) / 2; 3970 3971 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3972 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3973 path, val, de_1st, de_2nd); 3974 } else { 3975 val = tssi_info->tssi_mcs[path][gidx]; 3976 3977 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3978 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 3979 } 3980 3981 return val; 3982 3983 calc_6g: 3984 gidx = phy_tssi_get_6g_ofdm_group(ch); 3985 3986 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3987 "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", 3988 path, gidx); 3989 3990 if (PHY_IS_TSSI_EXTRA_GROUP(gidx)) { 3991 gidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(gidx); 3992 gidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(gidx); 3993 de_1st = tssi_info->tssi_6g_mcs[path][gidx_1st]; 3994 de_2nd = tssi_info->tssi_6g_mcs[path][gidx_2nd]; 3995 val = (de_1st + de_2nd) / 2; 3996 3997 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 3998 "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", 3999 path, val, de_1st, de_2nd); 4000 } else { 4001 val = tssi_info->tssi_6g_mcs[path][gidx]; 4002 4003 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4004 "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); 4005 } 4006 4007 return val; 4008 } 4009 4010 static s8 phy_tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, 4011 enum rtw89_phy_idx phy, 4012 const struct rtw89_chan *chan, 4013 enum rtw89_rf_path path) 4014 { 4015 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 4016 enum rtw89_band band = chan->band_type; 4017 u8 ch = chan->channel; 4018 u32 tgidx_1st; 4019 u32 tgidx_2nd; 4020 s8 tde_1st; 4021 s8 tde_2nd; 4022 u32 tgidx; 4023 s8 val; 4024 4025 if (band == RTW89_BAND_6G) 4026 goto calc_6g; 4027 4028 tgidx = phy_tssi_get_trim_group(ch); 4029 4030 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4031 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 4032 path, tgidx); 4033 4034 if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) { 4035 tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 4036 tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 4037 tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; 4038 tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; 4039 val = (tde_1st + tde_2nd) / 2; 4040 4041 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4042 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 4043 path, val, tde_1st, tde_2nd); 4044 } else { 4045 val = tssi_info->tssi_trim[path][tgidx]; 4046 4047 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4048 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 4049 path, val); 4050 } 4051 4052 return val; 4053 4054 calc_6g: 4055 tgidx = phy_tssi_get_6g_trim_group(ch); 4056 4057 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4058 "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", 4059 path, tgidx); 4060 4061 if (PHY_IS_TSSI_EXTRA_GROUP(tgidx)) { 4062 tgidx_1st = PHY_TSSI_EXTRA_GET_GROUP_IDX1(tgidx); 4063 tgidx_2nd = PHY_TSSI_EXTRA_GET_GROUP_IDX2(tgidx); 4064 tde_1st = tssi_info->tssi_trim_6g[path][tgidx_1st]; 4065 tde_2nd = tssi_info->tssi_trim_6g[path][tgidx_2nd]; 4066 val = (tde_1st + tde_2nd) / 2; 4067 4068 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4069 "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", 4070 path, val, tde_1st, tde_2nd); 4071 } else { 4072 val = tssi_info->tssi_trim_6g[path][tgidx]; 4073 4074 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4075 "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", 4076 path, val); 4077 } 4078 4079 return val; 4080 } 4081 4082 void rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(struct rtw89_dev *rtwdev, 4083 enum rtw89_phy_idx phy, 4084 const struct rtw89_chan *chan, 4085 struct rtw89_h2c_rf_tssi *h2c) 4086 { 4087 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 4088 u8 ch = chan->channel; 4089 s8 trim_de; 4090 s8 ofdm_de; 4091 s8 cck_de; 4092 u8 gidx; 4093 s8 val; 4094 int i; 4095 4096 rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", 4097 phy, ch); 4098 4099 for (i = RF_PATH_A; i <= RF_PATH_B; i++) { 4100 trim_de = phy_tssi_get_ofdm_trim_de(rtwdev, phy, chan, i); 4101 h2c->curr_tssi_trim_de[i] = trim_de; 4102 4103 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4104 "[TSSI][TRIM]: path=%d trim_de=0x%x\n", i, trim_de); 4105 4106 gidx = phy_tssi_get_cck_group(ch); 4107 cck_de = tssi_info->tssi_cck[i][gidx]; 4108 val = u32_get_bits(cck_de + trim_de, 0xff); 4109 4110 h2c->curr_tssi_cck_de[i] = 0x0; 4111 h2c->curr_tssi_cck_de_20m[i] = val; 4112 h2c->curr_tssi_cck_de_40m[i] = val; 4113 h2c->curr_tssi_efuse_cck_de[i] = cck_de; 4114 4115 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4116 "[TSSI][TRIM]: path=%d cck_de=0x%x\n", i, cck_de); 4117 4118 ofdm_de = phy_tssi_get_ofdm_de(rtwdev, phy, chan, i); 4119 val = u32_get_bits(ofdm_de + trim_de, 0xff); 4120 4121 h2c->curr_tssi_ofdm_de[i] = 0x0; 4122 h2c->curr_tssi_ofdm_de_20m[i] = val; 4123 h2c->curr_tssi_ofdm_de_40m[i] = val; 4124 h2c->curr_tssi_ofdm_de_80m[i] = val; 4125 h2c->curr_tssi_ofdm_de_160m[i] = val; 4126 h2c->curr_tssi_ofdm_de_320m[i] = val; 4127 h2c->curr_tssi_efuse_ofdm_de[i] = ofdm_de; 4128 4129 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4130 "[TSSI][TRIM]: path=%d ofdm_de=0x%x\n", i, ofdm_de); 4131 } 4132 } 4133 4134 void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev, 4135 enum rtw89_phy_idx phy, 4136 const struct rtw89_chan *chan, 4137 struct rtw89_h2c_rf_tssi *h2c) 4138 { 4139 struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk; 4140 struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; 4141 const s8 *thm_up[RF_PATH_B + 1] = {}; 4142 const s8 *thm_down[RF_PATH_B + 1] = {}; 4143 u8 subband = chan->subband_type; 4144 s8 thm_ofst[128] = {0}; 4145 u8 thermal; 4146 u8 path; 4147 u8 i, j; 4148 4149 switch (subband) { 4150 default: 4151 case RTW89_CH_2G: 4152 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0]; 4153 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0]; 4154 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0]; 4155 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0]; 4156 break; 4157 case RTW89_CH_5G_BAND_1: 4158 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0]; 4159 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0]; 4160 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0]; 4161 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0]; 4162 break; 4163 case RTW89_CH_5G_BAND_3: 4164 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1]; 4165 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1]; 4166 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1]; 4167 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1]; 4168 break; 4169 case RTW89_CH_5G_BAND_4: 4170 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2]; 4171 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2]; 4172 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2]; 4173 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2]; 4174 break; 4175 case RTW89_CH_6G_BAND_IDX0: 4176 case RTW89_CH_6G_BAND_IDX1: 4177 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][0]; 4178 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][0]; 4179 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][0]; 4180 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][0]; 4181 break; 4182 case RTW89_CH_6G_BAND_IDX2: 4183 case RTW89_CH_6G_BAND_IDX3: 4184 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][1]; 4185 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][1]; 4186 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][1]; 4187 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][1]; 4188 break; 4189 case RTW89_CH_6G_BAND_IDX4: 4190 case RTW89_CH_6G_BAND_IDX5: 4191 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][2]; 4192 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][2]; 4193 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][2]; 4194 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][2]; 4195 break; 4196 case RTW89_CH_6G_BAND_IDX6: 4197 case RTW89_CH_6G_BAND_IDX7: 4198 thm_up[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_P][3]; 4199 thm_down[RF_PATH_A] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GA_N][3]; 4200 thm_up[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_P][3]; 4201 thm_down[RF_PATH_B] = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_6GB_N][3]; 4202 break; 4203 } 4204 4205 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4206 "[TSSI] tmeter tbl on subband: %u\n", subband); 4207 4208 for (path = RF_PATH_A; path <= RF_PATH_B; path++) { 4209 thermal = tssi_info->thermal[path]; 4210 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4211 "path: %u, pg thermal: 0x%x\n", path, thermal); 4212 4213 if (thermal == 0xff) { 4214 h2c->pg_thermal[path] = 0x38; 4215 memset(h2c->ftable[path], 0, sizeof(h2c->ftable[path])); 4216 continue; 4217 } 4218 4219 h2c->pg_thermal[path] = thermal; 4220 4221 i = 0; 4222 for (j = 0; j < 64; j++) 4223 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 4224 thm_up[path][i++] : 4225 thm_up[path][DELTA_SWINGIDX_SIZE - 1]; 4226 4227 i = 1; 4228 for (j = 127; j >= 64; j--) 4229 thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? 4230 -thm_down[path][i++] : 4231 -thm_down[path][DELTA_SWINGIDX_SIZE - 1]; 4232 4233 for (i = 0; i < 128; i += 4) { 4234 h2c->ftable[path][i + 0] = thm_ofst[i + 3]; 4235 h2c->ftable[path][i + 1] = thm_ofst[i + 2]; 4236 h2c->ftable[path][i + 2] = thm_ofst[i + 1]; 4237 h2c->ftable[path][i + 3] = thm_ofst[i + 0]; 4238 4239 rtw89_debug(rtwdev, RTW89_DBG_TSSI, 4240 "thm ofst [%x]: %02x %02x %02x %02x\n", 4241 i, thm_ofst[i], thm_ofst[i + 1], 4242 thm_ofst[i + 2], thm_ofst[i + 3]); 4243 } 4244 } 4245 } 4246 4247 static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo) 4248 { 4249 const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info; 4250 u32 reg_mask; 4251 4252 if (sc_xo) 4253 reg_mask = xtal->sc_xo_mask; 4254 else 4255 reg_mask = xtal->sc_xi_mask; 4256 4257 return (u8)rtw89_read32_mask(rtwdev, xtal->xcap_reg, reg_mask); 4258 } 4259 4260 static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo, 4261 u8 val) 4262 { 4263 const struct rtw89_xtal_info *xtal = rtwdev->chip->xtal_info; 4264 u32 reg_mask; 4265 4266 if (sc_xo) 4267 reg_mask = xtal->sc_xo_mask; 4268 else 4269 reg_mask = xtal->sc_xi_mask; 4270 4271 rtw89_write32_mask(rtwdev, xtal->xcap_reg, reg_mask, val); 4272 } 4273 4274 static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev, 4275 u8 crystal_cap, bool force) 4276 { 4277 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4278 const struct rtw89_chip_info *chip = rtwdev->chip; 4279 u8 sc_xi_val, sc_xo_val; 4280 4281 if (!force && cfo->crystal_cap == crystal_cap) 4282 return; 4283 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8851B) { 4284 rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap); 4285 rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap); 4286 sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true); 4287 sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false); 4288 } else { 4289 rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, 4290 crystal_cap, XTAL_SC_XO_MASK); 4291 rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, 4292 crystal_cap, XTAL_SC_XI_MASK); 4293 rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XO, &sc_xo_val); 4294 rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_SC_XI, &sc_xi_val); 4295 } 4296 cfo->crystal_cap = sc_xi_val; 4297 cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap); 4298 4299 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val); 4300 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val); 4301 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n", 4302 cfo->x_cap_ofst); 4303 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n"); 4304 } 4305 4306 static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev) 4307 { 4308 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4309 u8 cap; 4310 4311 cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK; 4312 cfo->is_adjust = false; 4313 if (cfo->crystal_cap == cfo->def_x_cap) 4314 return; 4315 cap = cfo->crystal_cap; 4316 cap += (cap > cfo->def_x_cap ? -1 : 1); 4317 rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false); 4318 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4319 "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap, 4320 cfo->def_x_cap); 4321 } 4322 4323 static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo) 4324 { 4325 const struct rtw89_reg_def *dcfo_comp = rtwdev->chip->dcfo_comp; 4326 bool is_linked = rtwdev->total_sta_assoc > 0; 4327 s32 cfo_avg_312; 4328 s32 dcfo_comp_val; 4329 int sign; 4330 4331 if (rtwdev->chip->chip_id == RTL8922A) 4332 return; 4333 4334 if (!is_linked) { 4335 rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n", 4336 is_linked); 4337 return; 4338 } 4339 rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo); 4340 if (curr_cfo == 0) 4341 return; 4342 dcfo_comp_val = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO); 4343 sign = curr_cfo > 0 ? 1 : -1; 4344 cfo_avg_312 = curr_cfo / 625 + sign * dcfo_comp_val; 4345 rtw89_debug(rtwdev, RTW89_DBG_CFO, "avg_cfo_312=%d step\n", cfo_avg_312); 4346 if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) 4347 cfo_avg_312 = -cfo_avg_312; 4348 rtw89_phy_set_phy_regs(rtwdev, dcfo_comp->addr, dcfo_comp->mask, 4349 cfo_avg_312); 4350 } 4351 4352 static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev) 4353 { 4354 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 4355 const struct rtw89_chip_info *chip = rtwdev->chip; 4356 const struct rtw89_cfo_regs *cfo = phy->cfo; 4357 4358 rtw89_phy_set_phy_regs(rtwdev, cfo->comp_seg0, cfo->valid_0_mask, 1); 4359 rtw89_phy_set_phy_regs(rtwdev, cfo->comp, cfo->weighting_mask, 8); 4360 4361 if (chip->chip_gen == RTW89_CHIP_AX) { 4362 if (chip->cfo_hw_comp) { 4363 rtw89_write32_mask(rtwdev, R_AX_PWR_UL_CTRL2, 4364 B_AX_PWR_UL_CFO_MASK, 0x6); 4365 } else { 4366 rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1); 4367 rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, 4368 B_AX_PWR_UL_CFO_MASK); 4369 } 4370 } 4371 } 4372 4373 static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev) 4374 { 4375 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4376 struct rtw89_efuse *efuse = &rtwdev->efuse; 4377 4378 cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK; 4379 cfo->crystal_cap = cfo->crystal_cap_default; 4380 cfo->def_x_cap = cfo->crystal_cap; 4381 cfo->x_cap_ub = min_t(int, cfo->def_x_cap + CFO_BOUND, 0x7f); 4382 cfo->x_cap_lb = max_t(int, cfo->def_x_cap - CFO_BOUND, 0x1); 4383 cfo->is_adjust = false; 4384 cfo->divergence_lock_en = false; 4385 cfo->x_cap_ofst = 0; 4386 cfo->lock_cnt = 0; 4387 cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE; 4388 cfo->apply_compensation = false; 4389 cfo->residual_cfo_acc = 0; 4390 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n", 4391 cfo->crystal_cap_default); 4392 rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true); 4393 rtw89_dcfo_comp_init(rtwdev); 4394 cfo->cfo_timer_ms = 2000; 4395 cfo->cfo_trig_by_timer_en = false; 4396 cfo->phy_cfo_trk_cnt = 0; 4397 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4398 cfo->cfo_ul_ofdma_acc_mode = RTW89_CFO_UL_OFDMA_ACC_ENABLE; 4399 } 4400 4401 static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev, 4402 s32 curr_cfo) 4403 { 4404 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4405 int crystal_cap = cfo->crystal_cap; 4406 s32 cfo_abs = abs(curr_cfo); 4407 int sign; 4408 4409 if (curr_cfo == 0) { 4410 rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n"); 4411 return; 4412 } 4413 if (!cfo->is_adjust) { 4414 if (cfo_abs > CFO_TRK_ENABLE_TH) 4415 cfo->is_adjust = true; 4416 } else { 4417 if (cfo_abs <= CFO_TRK_STOP_TH) 4418 cfo->is_adjust = false; 4419 } 4420 if (!cfo->is_adjust) { 4421 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n"); 4422 return; 4423 } 4424 sign = curr_cfo > 0 ? 1 : -1; 4425 if (cfo_abs > CFO_TRK_STOP_TH_4) 4426 crystal_cap += 3 * sign; 4427 else if (cfo_abs > CFO_TRK_STOP_TH_3) 4428 crystal_cap += 3 * sign; 4429 else if (cfo_abs > CFO_TRK_STOP_TH_2) 4430 crystal_cap += 1 * sign; 4431 else if (cfo_abs > CFO_TRK_STOP_TH_1) 4432 crystal_cap += 1 * sign; 4433 else 4434 return; 4435 4436 crystal_cap = clamp(crystal_cap, 0, 127); 4437 rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false); 4438 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4439 "X_cap{Curr,Default}={0x%x,0x%x}\n", 4440 cfo->crystal_cap, cfo->def_x_cap); 4441 } 4442 4443 static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev) 4444 { 4445 const struct rtw89_chip_info *chip = rtwdev->chip; 4446 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4447 s32 cfo_khz_all = 0; 4448 s32 cfo_cnt_all = 0; 4449 s32 cfo_all_avg = 0; 4450 u8 i; 4451 4452 if (rtwdev->total_sta_assoc != 1) 4453 return 0; 4454 rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n"); 4455 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4456 if (cfo->cfo_cnt[i] == 0) 4457 continue; 4458 cfo_khz_all += cfo->cfo_tail[i]; 4459 cfo_cnt_all += cfo->cfo_cnt[i]; 4460 cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all); 4461 cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; 4462 cfo->dcfo_avg = phy_div(cfo_khz_all << chip->dcfo_comp_sft, 4463 cfo_cnt_all); 4464 } 4465 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4466 "CFO track for macid = %d\n", i); 4467 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4468 "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n", 4469 cfo_khz_all, cfo_cnt_all, cfo_all_avg); 4470 return cfo_all_avg; 4471 } 4472 4473 static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev) 4474 { 4475 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4476 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4477 s32 target_cfo = 0; 4478 s32 cfo_khz_all = 0; 4479 s32 cfo_khz_all_tp_wgt = 0; 4480 s32 cfo_avg = 0; 4481 s32 max_cfo_lb = BIT(31); 4482 s32 min_cfo_ub = GENMASK(30, 0); 4483 u16 cfo_cnt_all = 0; 4484 u8 active_entry_cnt = 0; 4485 u8 sta_cnt = 0; 4486 u32 tp_all = 0; 4487 u8 i; 4488 u8 cfo_tol = 0; 4489 4490 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n"); 4491 if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) { 4492 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n"); 4493 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4494 if (cfo->cfo_cnt[i] == 0) 4495 continue; 4496 cfo_khz_all += cfo->cfo_tail[i]; 4497 cfo_cnt_all += cfo->cfo_cnt[i]; 4498 cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all); 4499 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4500 "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n", 4501 cfo_khz_all, cfo_cnt_all, cfo_avg); 4502 target_cfo = cfo_avg; 4503 } 4504 } else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) { 4505 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n"); 4506 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4507 if (cfo->cfo_cnt[i] == 0) 4508 continue; 4509 cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], 4510 (s32)cfo->cfo_cnt[i]); 4511 cfo_khz_all += cfo->cfo_avg[i]; 4512 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4513 "Macid=%d, cfo_avg=%d\n", i, 4514 cfo->cfo_avg[i]); 4515 } 4516 sta_cnt = rtwdev->total_sta_assoc; 4517 cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt); 4518 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4519 "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n", 4520 cfo_khz_all, sta_cnt, cfo_avg); 4521 target_cfo = cfo_avg; 4522 } else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) { 4523 rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n"); 4524 cfo_tol = cfo->sta_cfo_tolerance; 4525 for (i = 0; i < CFO_TRACK_MAX_USER; i++) { 4526 sta_cnt++; 4527 if (cfo->cfo_cnt[i] != 0) { 4528 cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i], 4529 (s32)cfo->cfo_cnt[i]); 4530 active_entry_cnt++; 4531 } else { 4532 cfo->cfo_avg[i] = cfo->pre_cfo_avg[i]; 4533 } 4534 max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb); 4535 min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub); 4536 cfo_khz_all += cfo->cfo_avg[i]; 4537 /* need tp for each entry */ 4538 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4539 "[%d] cfo_avg=%d, tp=tbd\n", 4540 i, cfo->cfo_avg[i]); 4541 if (sta_cnt >= rtwdev->total_sta_assoc) 4542 break; 4543 } 4544 tp_all = stats->rx_throughput; /* need tp for each entry */ 4545 cfo_avg = phy_div(cfo_khz_all_tp_wgt, (s32)tp_all); 4546 4547 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n", 4548 sta_cnt); 4549 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n", 4550 active_entry_cnt); 4551 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4552 "Msta cfo with tp_wgt=%d, avg_cfo=%d\n", 4553 cfo_khz_all_tp_wgt, cfo_avg); 4554 rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n", 4555 max_cfo_lb, min_cfo_ub); 4556 if (max_cfo_lb <= min_cfo_ub) { 4557 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4558 "cfo win_size=%d\n", 4559 min_cfo_ub - max_cfo_lb); 4560 target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub); 4561 } else { 4562 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4563 "No intersection of cfo tolerance windows\n"); 4564 target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt); 4565 } 4566 for (i = 0; i < CFO_TRACK_MAX_USER; i++) 4567 cfo->pre_cfo_avg[i] = cfo->cfo_avg[i]; 4568 } 4569 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo); 4570 return target_cfo; 4571 } 4572 4573 static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev) 4574 { 4575 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4576 4577 memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail)); 4578 memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt)); 4579 cfo->packet_count = 0; 4580 cfo->packet_count_pre = 0; 4581 cfo->cfo_avg_pre = 0; 4582 } 4583 4584 static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) 4585 { 4586 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4587 s32 new_cfo = 0; 4588 bool x_cap_update = false; 4589 u8 pre_x_cap = cfo->crystal_cap; 4590 u8 dcfo_comp_sft = rtwdev->chip->dcfo_comp_sft; 4591 4592 cfo->dcfo_avg = 0; 4593 rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n", 4594 rtwdev->total_sta_assoc); 4595 if (rtwdev->total_sta_assoc == 0) { 4596 rtw89_phy_cfo_reset(rtwdev); 4597 return; 4598 } 4599 if (cfo->packet_count == 0) { 4600 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n"); 4601 return; 4602 } 4603 if (cfo->packet_count == cfo->packet_count_pre) { 4604 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n"); 4605 return; 4606 } 4607 if (rtwdev->total_sta_assoc == 1) 4608 new_cfo = rtw89_phy_average_cfo_calc(rtwdev); 4609 else 4610 new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev); 4611 if (cfo->divergence_lock_en) { 4612 cfo->lock_cnt++; 4613 if (cfo->lock_cnt > CFO_PERIOD_CNT) { 4614 cfo->divergence_lock_en = false; 4615 cfo->lock_cnt = 0; 4616 } else { 4617 rtw89_phy_cfo_reset(rtwdev); 4618 } 4619 return; 4620 } 4621 if (cfo->crystal_cap >= cfo->x_cap_ub || 4622 cfo->crystal_cap <= cfo->x_cap_lb) { 4623 cfo->divergence_lock_en = true; 4624 rtw89_phy_cfo_reset(rtwdev); 4625 return; 4626 } 4627 4628 rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo); 4629 cfo->cfo_avg_pre = new_cfo; 4630 cfo->dcfo_avg_pre = cfo->dcfo_avg; 4631 x_cap_update = cfo->crystal_cap != pre_x_cap; 4632 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update); 4633 rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n", 4634 cfo->def_x_cap, pre_x_cap, cfo->crystal_cap, 4635 cfo->x_cap_ofst); 4636 if (x_cap_update) { 4637 if (cfo->dcfo_avg > 0) 4638 cfo->dcfo_avg -= CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft; 4639 else 4640 cfo->dcfo_avg += CFO_SW_COMP_FINE_TUNE << dcfo_comp_sft; 4641 } 4642 rtw89_dcfo_comp(rtwdev, cfo->dcfo_avg); 4643 rtw89_phy_cfo_statistics_reset(rtwdev); 4644 } 4645 4646 void rtw89_phy_cfo_track_work(struct work_struct *work) 4647 { 4648 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 4649 cfo_track_work.work); 4650 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4651 4652 mutex_lock(&rtwdev->mutex); 4653 if (!cfo->cfo_trig_by_timer_en) 4654 goto out; 4655 rtw89_leave_ps_mode(rtwdev); 4656 rtw89_phy_cfo_dm(rtwdev); 4657 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, 4658 msecs_to_jiffies(cfo->cfo_timer_ms)); 4659 out: 4660 mutex_unlock(&rtwdev->mutex); 4661 } 4662 4663 static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev) 4664 { 4665 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4666 4667 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, 4668 msecs_to_jiffies(cfo->cfo_timer_ms)); 4669 } 4670 4671 void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev) 4672 { 4673 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4674 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4675 bool is_ul_ofdma = false, ofdma_acc_en = false; 4676 4677 if (stats->rx_tf_periodic > CFO_TF_CNT_TH) 4678 is_ul_ofdma = true; 4679 if (cfo->cfo_ul_ofdma_acc_mode == RTW89_CFO_UL_OFDMA_ACC_ENABLE && 4680 is_ul_ofdma) 4681 ofdma_acc_en = true; 4682 4683 switch (cfo->phy_cfo_status) { 4684 case RTW89_PHY_DCFO_STATE_NORMAL: 4685 if (stats->tx_throughput >= CFO_TP_UPPER) { 4686 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE; 4687 cfo->cfo_trig_by_timer_en = true; 4688 cfo->cfo_timer_ms = CFO_COMP_PERIOD; 4689 rtw89_phy_cfo_start_work(rtwdev); 4690 } 4691 break; 4692 case RTW89_PHY_DCFO_STATE_ENHANCE: 4693 if (stats->tx_throughput <= CFO_TP_LOWER) 4694 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4695 else if (ofdma_acc_en && 4696 cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT) 4697 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_HOLD; 4698 else 4699 cfo->phy_cfo_trk_cnt++; 4700 4701 if (cfo->phy_cfo_status == RTW89_PHY_DCFO_STATE_NORMAL) { 4702 cfo->phy_cfo_trk_cnt = 0; 4703 cfo->cfo_trig_by_timer_en = false; 4704 } 4705 break; 4706 case RTW89_PHY_DCFO_STATE_HOLD: 4707 if (stats->tx_throughput <= CFO_TP_LOWER) { 4708 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4709 cfo->phy_cfo_trk_cnt = 0; 4710 cfo->cfo_trig_by_timer_en = false; 4711 } else { 4712 cfo->phy_cfo_trk_cnt++; 4713 } 4714 break; 4715 default: 4716 cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL; 4717 cfo->phy_cfo_trk_cnt = 0; 4718 break; 4719 } 4720 rtw89_debug(rtwdev, RTW89_DBG_CFO, 4721 "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n", 4722 stats->tx_throughput, cfo->phy_cfo_status, 4723 cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt, 4724 ewma_thermal_read(&rtwdev->phystat.avg_thermal[0])); 4725 if (cfo->cfo_trig_by_timer_en) 4726 return; 4727 rtw89_phy_cfo_dm(rtwdev); 4728 } 4729 4730 void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, 4731 struct rtw89_rx_phy_ppdu *phy_ppdu) 4732 { 4733 struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; 4734 u8 macid = phy_ppdu->mac_id; 4735 4736 if (macid >= CFO_TRACK_MAX_USER) { 4737 rtw89_warn(rtwdev, "mac_id %d is out of range\n", macid); 4738 return; 4739 } 4740 4741 cfo->cfo_tail[macid] += cfo_val; 4742 cfo->cfo_cnt[macid]++; 4743 cfo->packet_count++; 4744 } 4745 4746 void rtw89_phy_ul_tb_assoc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4747 { 4748 const struct rtw89_chip_info *chip = rtwdev->chip; 4749 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 4750 rtwvif_link->chanctx_idx); 4751 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4752 4753 if (!chip->ul_tb_waveform_ctrl) 4754 return; 4755 4756 rtwvif_link->def_tri_idx = 4757 rtw89_phy_read32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG); 4758 4759 if (chip->chip_id == RTL8852B && rtwdev->hal.cv > CHIP_CBV) 4760 rtwvif_link->dyn_tb_bedge_en = false; 4761 else if (chan->band_type >= RTW89_BAND_5G && 4762 chan->band_width >= RTW89_CHANNEL_WIDTH_40) 4763 rtwvif_link->dyn_tb_bedge_en = true; 4764 else 4765 rtwvif_link->dyn_tb_bedge_en = false; 4766 4767 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4768 "[ULTB] def_if_bandedge=%d, def_tri_idx=%d\n", 4769 ul_tb_info->def_if_bandedge, rtwvif_link->def_tri_idx); 4770 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4771 "[ULTB] dyn_tb_begde_en=%d, dyn_tb_tri_en=%d\n", 4772 rtwvif_link->dyn_tb_bedge_en, ul_tb_info->dyn_tb_tri_en); 4773 } 4774 4775 struct rtw89_phy_ul_tb_check_data { 4776 bool valid; 4777 bool high_tf_client; 4778 bool low_tf_client; 4779 bool dyn_tb_bedge_en; 4780 u8 def_tri_idx; 4781 }; 4782 4783 struct rtw89_phy_power_diff { 4784 u32 q_00; 4785 u32 q_11; 4786 u32 q_matrix_en; 4787 u32 ultb_1t_norm_160; 4788 u32 ultb_2t_norm_160; 4789 u32 com1_norm_1sts; 4790 u32 com2_resp_1sts_path; 4791 }; 4792 4793 static void rtw89_phy_ofdma_power_diff(struct rtw89_dev *rtwdev, 4794 struct rtw89_vif_link *rtwvif_link) 4795 { 4796 static const struct rtw89_phy_power_diff table[2] = { 4797 {0x0, 0x0, 0x0, 0x0, 0xf4, 0x3, 0x3}, 4798 {0xb50, 0xb50, 0x1, 0xc, 0x0, 0x1, 0x1}, 4799 }; 4800 const struct rtw89_phy_power_diff *param; 4801 u32 reg; 4802 4803 if (!rtwdev->chip->ul_tb_pwr_diff) 4804 return; 4805 4806 if (rtwvif_link->pwr_diff_en == rtwvif_link->pre_pwr_diff_en) { 4807 rtwvif_link->pwr_diff_en = false; 4808 return; 4809 } 4810 4811 rtwvif_link->pre_pwr_diff_en = rtwvif_link->pwr_diff_en; 4812 param = &table[rtwvif_link->pwr_diff_en]; 4813 4814 rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_00, B_Q_MATRIX_00_REAL, 4815 param->q_00); 4816 rtw89_phy_write32_mask(rtwdev, R_Q_MATRIX_11, B_Q_MATRIX_11_REAL, 4817 param->q_11); 4818 rtw89_phy_write32_mask(rtwdev, R_CUSTOMIZE_Q_MATRIX, 4819 B_CUSTOMIZE_Q_MATRIX_EN, param->q_matrix_en); 4820 4821 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, rtwvif_link->mac_idx); 4822 rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_NORM_BW160, 4823 param->ultb_1t_norm_160); 4824 4825 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, rtwvif_link->mac_idx); 4826 rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_NORM_BW160, 4827 param->ultb_2t_norm_160); 4828 4829 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM1, rtwvif_link->mac_idx); 4830 rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM1_NORM_1STS, 4831 param->com1_norm_1sts); 4832 4833 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PATH_COM2, rtwvif_link->mac_idx); 4834 rtw89_write32_mask(rtwdev, reg, B_AX_PATH_COM2_RESP_1STS_PATH, 4835 param->com2_resp_1sts_path); 4836 } 4837 4838 static 4839 void rtw89_phy_ul_tb_ctrl_check(struct rtw89_dev *rtwdev, 4840 struct rtw89_vif_link *rtwvif_link, 4841 struct rtw89_phy_ul_tb_check_data *ul_tb_data) 4842 { 4843 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4844 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4845 4846 if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION) 4847 return; 4848 4849 if (!vif->cfg.assoc) 4850 return; 4851 4852 if (rtwdev->chip->ul_tb_waveform_ctrl) { 4853 if (stats->rx_tf_periodic > UL_TB_TF_CNT_L2H_TH) 4854 ul_tb_data->high_tf_client = true; 4855 else if (stats->rx_tf_periodic < UL_TB_TF_CNT_H2L_TH) 4856 ul_tb_data->low_tf_client = true; 4857 4858 ul_tb_data->valid = true; 4859 ul_tb_data->def_tri_idx = rtwvif_link->def_tri_idx; 4860 ul_tb_data->dyn_tb_bedge_en = rtwvif_link->dyn_tb_bedge_en; 4861 } 4862 4863 rtw89_phy_ofdma_power_diff(rtwdev, rtwvif_link); 4864 } 4865 4866 static void rtw89_phy_ul_tb_waveform_ctrl(struct rtw89_dev *rtwdev, 4867 struct rtw89_phy_ul_tb_check_data *ul_tb_data) 4868 { 4869 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4870 4871 if (!rtwdev->chip->ul_tb_waveform_ctrl) 4872 return; 4873 4874 if (ul_tb_data->dyn_tb_bedge_en) { 4875 if (ul_tb_data->high_tf_client) { 4876 rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0); 4877 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4878 "[ULTB] Turn off if_bandedge\n"); 4879 } else if (ul_tb_data->low_tf_client) { 4880 rtw89_phy_write32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 4881 ul_tb_info->def_if_bandedge); 4882 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4883 "[ULTB] Set to default if_bandedge = %d\n", 4884 ul_tb_info->def_if_bandedge); 4885 } 4886 } 4887 4888 if (ul_tb_info->dyn_tb_tri_en) { 4889 if (ul_tb_data->high_tf_client) { 4890 rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, 4891 B_TXSHAPE_TRIANGULAR_CFG, 0); 4892 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4893 "[ULTB] Turn off Tx triangle\n"); 4894 } else if (ul_tb_data->low_tf_client) { 4895 rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, 4896 B_TXSHAPE_TRIANGULAR_CFG, 4897 ul_tb_data->def_tri_idx); 4898 rtw89_debug(rtwdev, RTW89_DBG_UL_TB, 4899 "[ULTB] Set to default tx_shap_idx = %d\n", 4900 ul_tb_data->def_tri_idx); 4901 } 4902 } 4903 } 4904 4905 void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev) 4906 { 4907 const struct rtw89_chip_info *chip = rtwdev->chip; 4908 struct rtw89_phy_ul_tb_check_data ul_tb_data = {}; 4909 struct rtw89_vif_link *rtwvif_link; 4910 struct rtw89_vif *rtwvif; 4911 unsigned int link_id; 4912 4913 if (!chip->ul_tb_waveform_ctrl && !chip->ul_tb_pwr_diff) 4914 return; 4915 4916 if (rtwdev->total_sta_assoc != 1) 4917 return; 4918 4919 rtw89_for_each_rtwvif(rtwdev, rtwvif) 4920 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 4921 rtw89_phy_ul_tb_ctrl_check(rtwdev, rtwvif_link, &ul_tb_data); 4922 4923 if (!ul_tb_data.valid) 4924 return; 4925 4926 rtw89_phy_ul_tb_waveform_ctrl(rtwdev, &ul_tb_data); 4927 } 4928 4929 static void rtw89_phy_ul_tb_info_init(struct rtw89_dev *rtwdev) 4930 { 4931 const struct rtw89_chip_info *chip = rtwdev->chip; 4932 struct rtw89_phy_ul_tb_info *ul_tb_info = &rtwdev->ul_tb_info; 4933 4934 if (!chip->ul_tb_waveform_ctrl) 4935 return; 4936 4937 ul_tb_info->dyn_tb_tri_en = true; 4938 ul_tb_info->def_if_bandedge = 4939 rtw89_phy_read32_mask(rtwdev, R_BANDEDGE, B_BANDEDGE_EN); 4940 } 4941 4942 static 4943 void rtw89_phy_antdiv_sts_instance_reset(struct rtw89_antdiv_stats *antdiv_sts) 4944 { 4945 ewma_rssi_init(&antdiv_sts->cck_rssi_avg); 4946 ewma_rssi_init(&antdiv_sts->ofdm_rssi_avg); 4947 ewma_rssi_init(&antdiv_sts->non_legacy_rssi_avg); 4948 antdiv_sts->pkt_cnt_cck = 0; 4949 antdiv_sts->pkt_cnt_ofdm = 0; 4950 antdiv_sts->pkt_cnt_non_legacy = 0; 4951 antdiv_sts->evm = 0; 4952 } 4953 4954 static void rtw89_phy_antdiv_sts_instance_add(struct rtw89_dev *rtwdev, 4955 struct rtw89_rx_phy_ppdu *phy_ppdu, 4956 struct rtw89_antdiv_stats *stats) 4957 { 4958 if (rtw89_get_data_rate_mode(rtwdev, phy_ppdu->rate) == DATA_RATE_MODE_NON_HT) { 4959 if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) { 4960 ewma_rssi_add(&stats->cck_rssi_avg, phy_ppdu->rssi_avg); 4961 stats->pkt_cnt_cck++; 4962 } else { 4963 ewma_rssi_add(&stats->ofdm_rssi_avg, phy_ppdu->rssi_avg); 4964 stats->pkt_cnt_ofdm++; 4965 stats->evm += phy_ppdu->ofdm.evm_min; 4966 } 4967 } else { 4968 ewma_rssi_add(&stats->non_legacy_rssi_avg, phy_ppdu->rssi_avg); 4969 stats->pkt_cnt_non_legacy++; 4970 stats->evm += phy_ppdu->ofdm.evm_min; 4971 } 4972 } 4973 4974 static u8 rtw89_phy_antdiv_sts_instance_get_rssi(struct rtw89_antdiv_stats *stats) 4975 { 4976 if (stats->pkt_cnt_non_legacy >= stats->pkt_cnt_cck && 4977 stats->pkt_cnt_non_legacy >= stats->pkt_cnt_ofdm) 4978 return ewma_rssi_read(&stats->non_legacy_rssi_avg); 4979 else if (stats->pkt_cnt_ofdm >= stats->pkt_cnt_cck && 4980 stats->pkt_cnt_ofdm >= stats->pkt_cnt_non_legacy) 4981 return ewma_rssi_read(&stats->ofdm_rssi_avg); 4982 else 4983 return ewma_rssi_read(&stats->cck_rssi_avg); 4984 } 4985 4986 static u8 rtw89_phy_antdiv_sts_instance_get_evm(struct rtw89_antdiv_stats *stats) 4987 { 4988 return phy_div(stats->evm, stats->pkt_cnt_non_legacy + stats->pkt_cnt_ofdm); 4989 } 4990 4991 void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev, 4992 struct rtw89_rx_phy_ppdu *phy_ppdu) 4993 { 4994 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 4995 struct rtw89_hal *hal = &rtwdev->hal; 4996 4997 if (!hal->ant_diversity || hal->ant_diversity_fixed) 4998 return; 4999 5000 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->target_stats); 5001 5002 if (!antdiv->get_stats) 5003 return; 5004 5005 if (hal->antenna_rx == RF_A) 5006 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->main_stats); 5007 else if (hal->antenna_rx == RF_B) 5008 rtw89_phy_antdiv_sts_instance_add(rtwdev, phy_ppdu, &antdiv->aux_stats); 5009 } 5010 5011 static void rtw89_phy_antdiv_reg_init(struct rtw89_dev *rtwdev) 5012 { 5013 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_ANT_TRAIN_EN, 5014 0x0, RTW89_PHY_0); 5015 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_TX_ANT_SEL, 5016 0x0, RTW89_PHY_0); 5017 5018 rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_TRSW_TX_EXTEND, 5019 0x0, RTW89_PHY_0); 5020 rtw89_phy_write32_idx(rtwdev, R_P0_ANT_SW, B_P0_HW_ANTSW_DIS_BY_GNT_BT, 5021 0x0, RTW89_PHY_0); 5022 5023 rtw89_phy_write32_idx(rtwdev, R_P0_TRSW, B_P0_BT_FORCE_ANTIDX_EN, 5024 0x0, RTW89_PHY_0); 5025 5026 rtw89_phy_write32_idx(rtwdev, R_RFSW_CTRL_ANT0_BASE, B_RFSW_CTRL_ANT_MAPPING, 5027 0x0100, RTW89_PHY_0); 5028 5029 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_BTG_TRX, 5030 0x1, RTW89_PHY_0); 5031 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_HW_CTRL, 5032 0x0, RTW89_PHY_0); 5033 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_2G, 5034 0x0, RTW89_PHY_0); 5035 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_SW_5G, 5036 0x0, RTW89_PHY_0); 5037 } 5038 5039 static void rtw89_phy_antdiv_sts_reset(struct rtw89_dev *rtwdev) 5040 { 5041 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5042 5043 rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats); 5044 rtw89_phy_antdiv_sts_instance_reset(&antdiv->main_stats); 5045 rtw89_phy_antdiv_sts_instance_reset(&antdiv->aux_stats); 5046 } 5047 5048 static void rtw89_phy_antdiv_init(struct rtw89_dev *rtwdev) 5049 { 5050 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 5051 struct rtw89_hal *hal = &rtwdev->hal; 5052 5053 if (!hal->ant_diversity) 5054 return; 5055 5056 antdiv->get_stats = false; 5057 antdiv->rssi_pre = 0; 5058 rtw89_phy_antdiv_sts_reset(rtwdev); 5059 rtw89_phy_antdiv_reg_init(rtwdev); 5060 } 5061 5062 static void rtw89_phy_thermal_protect(struct rtw89_dev *rtwdev) 5063 { 5064 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 5065 struct rtw89_hal *hal = &rtwdev->hal; 5066 u8 th_max = phystat->last_thermal_max; 5067 u8 lv = hal->thermal_prot_lv; 5068 5069 if (!hal->thermal_prot_th || 5070 (hal->disabled_dm_bitmap & BIT(RTW89_DM_THERMAL_PROTECT))) 5071 return; 5072 5073 if (th_max > hal->thermal_prot_th && lv < RTW89_THERMAL_PROT_LV_MAX) 5074 lv++; 5075 else if (th_max < hal->thermal_prot_th - 2 && lv > 0) 5076 lv--; 5077 else 5078 return; 5079 5080 hal->thermal_prot_lv = lv; 5081 5082 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, "thermal protection lv=%d\n", lv); 5083 5084 rtw89_fw_h2c_tx_duty(rtwdev, hal->thermal_prot_lv); 5085 } 5086 5087 static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev) 5088 { 5089 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 5090 u8 th, th_max = 0; 5091 int i; 5092 5093 for (i = 0; i < rtwdev->chip->rf_path_num; i++) { 5094 th = rtw89_chip_get_thermal(rtwdev, i); 5095 if (th) 5096 ewma_thermal_add(&phystat->avg_thermal[i], th); 5097 5098 rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, 5099 "path(%d) thermal cur=%u avg=%ld", i, th, 5100 ewma_thermal_read(&phystat->avg_thermal[i])); 5101 5102 th_max = max(th_max, th); 5103 } 5104 5105 phystat->last_thermal_max = th_max; 5106 } 5107 5108 struct rtw89_phy_iter_rssi_data { 5109 struct rtw89_dev *rtwdev; 5110 struct rtw89_phy_ch_info *ch_info; 5111 bool rssi_changed; 5112 }; 5113 5114 static 5115 void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link, 5116 struct rtw89_phy_iter_rssi_data *rssi_data) 5117 { 5118 struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info; 5119 unsigned long rssi_curr; 5120 5121 rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi); 5122 5123 if (rssi_curr < ch_info->rssi_min) { 5124 ch_info->rssi_min = rssi_curr; 5125 ch_info->rssi_min_macid = rtwsta_link->mac_id; 5126 } 5127 5128 if (rtwsta_link->prev_rssi == 0) { 5129 rtwsta_link->prev_rssi = rssi_curr; 5130 } else if (abs((int)rtwsta_link->prev_rssi - (int)rssi_curr) > 5131 (3 << RSSI_FACTOR)) { 5132 rtwsta_link->prev_rssi = rssi_curr; 5133 rssi_data->rssi_changed = true; 5134 } 5135 } 5136 5137 static void rtw89_phy_stat_rssi_update_iter(void *data, 5138 struct ieee80211_sta *sta) 5139 { 5140 struct rtw89_phy_iter_rssi_data *rssi_data = 5141 (struct rtw89_phy_iter_rssi_data *)data; 5142 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 5143 struct rtw89_sta_link *rtwsta_link; 5144 unsigned int link_id; 5145 5146 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) 5147 __rtw89_phy_stat_rssi_update_iter(rtwsta_link, rssi_data); 5148 } 5149 5150 static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev) 5151 { 5152 struct rtw89_phy_iter_rssi_data rssi_data = {0}; 5153 5154 rssi_data.rtwdev = rtwdev; 5155 rssi_data.ch_info = &rtwdev->ch_info; 5156 rssi_data.ch_info->rssi_min = U8_MAX; 5157 ieee80211_iterate_stations_atomic(rtwdev->hw, 5158 rtw89_phy_stat_rssi_update_iter, 5159 &rssi_data); 5160 if (rssi_data.rssi_changed) 5161 rtw89_btc_ntfy_wl_sta(rtwdev); 5162 } 5163 5164 static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev) 5165 { 5166 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 5167 int i; 5168 5169 for (i = 0; i < rtwdev->chip->rf_path_num; i++) 5170 ewma_thermal_init(&phystat->avg_thermal[i]); 5171 5172 rtw89_phy_stat_thermal_update(rtwdev); 5173 5174 memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); 5175 memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat)); 5176 5177 ewma_rssi_init(&phystat->bcn_rssi); 5178 5179 rtwdev->hal.thermal_prot_lv = 0; 5180 } 5181 5182 void rtw89_phy_stat_track(struct rtw89_dev *rtwdev) 5183 { 5184 struct rtw89_phy_stat *phystat = &rtwdev->phystat; 5185 5186 rtw89_phy_stat_thermal_update(rtwdev); 5187 rtw89_phy_thermal_protect(rtwdev); 5188 rtw89_phy_stat_rssi_update(rtwdev); 5189 5190 phystat->last_pkt_stat = phystat->cur_pkt_stat; 5191 memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); 5192 } 5193 5194 static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us) 5195 { 5196 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5197 5198 return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); 5199 } 5200 5201 static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx) 5202 { 5203 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5204 5205 return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); 5206 } 5207 5208 static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) 5209 { 5210 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5211 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5212 const struct rtw89_ccx_regs *ccx = phy->ccx; 5213 5214 env->ccx_manual_ctrl = false; 5215 env->ccx_ongoing = false; 5216 env->ccx_rac_lv = RTW89_RAC_RELEASE; 5217 env->ccx_period = 0; 5218 env->ccx_unit_idx = RTW89_CCX_32_US; 5219 5220 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1); 5221 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1); 5222 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); 5223 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask, 5224 RTW89_CCX_EDCCA_BW20_0); 5225 } 5226 5227 static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report, 5228 u16 score) 5229 { 5230 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5231 u32 numer = 0; 5232 u16 ret = 0; 5233 5234 numer = report * score + (env->ccx_period >> 1); 5235 if (env->ccx_period) 5236 ret = numer / env->ccx_period; 5237 5238 return ret >= score ? score - 1 : ret; 5239 } 5240 5241 static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev, 5242 u16 time_ms, u32 *period, 5243 u32 *unit_idx) 5244 { 5245 u32 idx; 5246 u8 quotient; 5247 5248 if (time_ms >= CCX_MAX_PERIOD) 5249 time_ms = CCX_MAX_PERIOD; 5250 5251 quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD; 5252 5253 if (quotient < 4) 5254 idx = RTW89_CCX_4_US; 5255 else if (quotient < 8) 5256 idx = RTW89_CCX_8_US; 5257 else if (quotient < 16) 5258 idx = RTW89_CCX_16_US; 5259 else 5260 idx = RTW89_CCX_32_US; 5261 5262 *unit_idx = idx; 5263 *period = (time_ms * MS_TO_4US_RATIO) >> idx; 5264 5265 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5266 "[Trigger Time] period:%d, unit_idx:%d\n", 5267 *period, *unit_idx); 5268 } 5269 5270 static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev) 5271 { 5272 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5273 5274 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5275 "lv:(%d)->(0)\n", env->ccx_rac_lv); 5276 5277 env->ccx_ongoing = false; 5278 env->ccx_rac_lv = RTW89_RAC_RELEASE; 5279 env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5280 } 5281 5282 static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev, 5283 struct rtw89_ccx_para_info *para) 5284 { 5285 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5286 bool is_update = env->ifs_clm_app != para->ifs_clm_app; 5287 u8 i = 0; 5288 u16 *ifs_th_l = env->ifs_clm_th_l; 5289 u16 *ifs_th_h = env->ifs_clm_th_h; 5290 u32 ifs_th0_us = 0, ifs_th_times = 0; 5291 u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0}; 5292 5293 if (!is_update) 5294 goto ifs_update_finished; 5295 5296 switch (para->ifs_clm_app) { 5297 case RTW89_IFS_CLM_INIT: 5298 case RTW89_IFS_CLM_BACKGROUND: 5299 case RTW89_IFS_CLM_ACS: 5300 case RTW89_IFS_CLM_DBG: 5301 case RTW89_IFS_CLM_DIG: 5302 case RTW89_IFS_CLM_TDMA_DIG: 5303 ifs_th0_us = IFS_CLM_TH0_UPPER; 5304 ifs_th_times = IFS_CLM_TH_MUL; 5305 break; 5306 case RTW89_IFS_CLM_DBG_MANUAL: 5307 ifs_th0_us = para->ifs_clm_manual_th0; 5308 ifs_th_times = para->ifs_clm_manual_th_times; 5309 break; 5310 default: 5311 break; 5312 } 5313 5314 /* Set sampling threshold for 4 different regions, unit in idx_cnt. 5315 * low[i] = high[i-1] + 1 5316 * high[i] = high[i-1] * ifs_th_times 5317 */ 5318 ifs_th_l[IFS_CLM_TH_START_IDX] = 0; 5319 ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us; 5320 ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, 5321 ifs_th0_us); 5322 for (i = 1; i < RTW89_IFS_CLM_NUM; i++) { 5323 ifs_th_l[i] = ifs_th_h[i - 1] + 1; 5324 ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times; 5325 ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]); 5326 } 5327 5328 ifs_update_finished: 5329 if (!is_update) 5330 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5331 "No need to update IFS_TH\n"); 5332 5333 return is_update; 5334 } 5335 5336 static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) 5337 { 5338 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5339 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5340 const struct rtw89_ccx_regs *ccx = phy->ccx; 5341 u8 i = 0; 5342 5343 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask, 5344 env->ifs_clm_th_l[0]); 5345 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask, 5346 env->ifs_clm_th_l[1]); 5347 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask, 5348 env->ifs_clm_th_l[2]); 5349 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask, 5350 env->ifs_clm_th_l[3]); 5351 5352 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask, 5353 env->ifs_clm_th_h[0]); 5354 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask, 5355 env->ifs_clm_th_h[1]); 5356 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask, 5357 env->ifs_clm_th_h[2]); 5358 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask, 5359 env->ifs_clm_th_h[3]); 5360 5361 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5362 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5363 "Update IFS_T%d_th{low, high} : {%d, %d}\n", 5364 i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]); 5365 } 5366 5367 static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) 5368 { 5369 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5370 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5371 const struct rtw89_ccx_regs *ccx = phy->ccx; 5372 struct rtw89_ccx_para_info para = {0}; 5373 5374 env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5375 env->ifs_clm_mntr_time = 0; 5376 5377 para.ifs_clm_app = RTW89_IFS_CLM_INIT; 5378 if (rtw89_phy_ifs_clm_th_update_check(rtwdev, ¶)) 5379 rtw89_phy_ifs_clm_set_th_reg(rtwdev); 5380 5381 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true); 5382 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true); 5383 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true); 5384 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true); 5385 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true); 5386 } 5387 5388 static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, 5389 enum rtw89_env_racing_lv level) 5390 { 5391 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5392 int ret = 0; 5393 5394 if (level >= RTW89_RAC_MAX_NUM) { 5395 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5396 "[WARNING] Wrong LV=%d\n", level); 5397 return -EINVAL; 5398 } 5399 5400 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5401 "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing, 5402 env->ccx_rac_lv, level); 5403 5404 if (env->ccx_ongoing) { 5405 if (level <= env->ccx_rac_lv) 5406 ret = -EINVAL; 5407 else 5408 env->ccx_ongoing = false; 5409 } 5410 5411 if (ret == 0) 5412 env->ccx_rac_lv = level; 5413 5414 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n", 5415 !ret); 5416 5417 return ret; 5418 } 5419 5420 static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev) 5421 { 5422 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5423 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5424 const struct rtw89_ccx_regs *ccx = phy->ccx; 5425 5426 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0); 5427 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0); 5428 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1); 5429 rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); 5430 5431 env->ccx_ongoing = true; 5432 } 5433 5434 static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) 5435 { 5436 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5437 u8 i = 0; 5438 u32 res = 0; 5439 5440 env->ifs_clm_tx_ratio = 5441 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT); 5442 env->ifs_clm_edcca_excl_cca_ratio = 5443 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca, 5444 PERCENT); 5445 env->ifs_clm_cck_fa_ratio = 5446 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT); 5447 env->ifs_clm_ofdm_fa_ratio = 5448 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT); 5449 env->ifs_clm_cck_cca_excl_fa_ratio = 5450 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa, 5451 PERCENT); 5452 env->ifs_clm_ofdm_cca_excl_fa_ratio = 5453 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa, 5454 PERCENT); 5455 env->ifs_clm_cck_fa_permil = 5456 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL); 5457 env->ifs_clm_ofdm_fa_permil = 5458 rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL); 5459 5460 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) { 5461 if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) { 5462 env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD; 5463 } else { 5464 env->ifs_clm_ifs_avg[i] = 5465 rtw89_phy_ccx_idx_to_us(rtwdev, 5466 env->ifs_clm_avg[i]); 5467 } 5468 5469 res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]); 5470 res += env->ifs_clm_his[i] >> 1; 5471 if (env->ifs_clm_his[i]) 5472 res /= env->ifs_clm_his[i]; 5473 else 5474 res = 0; 5475 env->ifs_clm_cca_avg[i] = res; 5476 } 5477 5478 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5479 "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n", 5480 env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio); 5481 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5482 "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n", 5483 env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio); 5484 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5485 "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n", 5486 env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil); 5487 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5488 "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n", 5489 env->ifs_clm_cck_cca_excl_fa_ratio, 5490 env->ifs_clm_ofdm_cca_excl_fa_ratio); 5491 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5492 "Time:[his, ifs_avg(us), cca_avg(us)]\n"); 5493 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5494 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n", 5495 i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i], 5496 env->ifs_clm_cca_avg[i]); 5497 } 5498 5499 static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) 5500 { 5501 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5502 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5503 const struct rtw89_ccx_regs *ccx = phy->ccx; 5504 u8 i = 0; 5505 5506 if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, 5507 ccx->ifs_cnt_done_mask) == 0) { 5508 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5509 "Get IFS_CLM report Fail\n"); 5510 return false; 5511 } 5512 5513 env->ifs_clm_tx = 5514 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, 5515 ccx->ifs_clm_tx_cnt_msk); 5516 env->ifs_clm_edcca_excl_cca = 5517 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, 5518 ccx->ifs_clm_edcca_excl_cca_fa_mask); 5519 env->ifs_clm_cckcca_excl_fa = 5520 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, 5521 ccx->ifs_clm_cckcca_excl_fa_mask); 5522 env->ifs_clm_ofdmcca_excl_fa = 5523 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, 5524 ccx->ifs_clm_ofdmcca_excl_fa_mask); 5525 env->ifs_clm_cckfa = 5526 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, 5527 ccx->ifs_clm_cck_fa_mask); 5528 env->ifs_clm_ofdmfa = 5529 rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, 5530 ccx->ifs_clm_ofdm_fa_mask); 5531 5532 env->ifs_clm_his[0] = 5533 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5534 ccx->ifs_t1_his_mask); 5535 env->ifs_clm_his[1] = 5536 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5537 ccx->ifs_t2_his_mask); 5538 env->ifs_clm_his[2] = 5539 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5540 ccx->ifs_t3_his_mask); 5541 env->ifs_clm_his[3] = 5542 rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, 5543 ccx->ifs_t4_his_mask); 5544 5545 env->ifs_clm_avg[0] = 5546 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, 5547 ccx->ifs_t1_avg_mask); 5548 env->ifs_clm_avg[1] = 5549 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, 5550 ccx->ifs_t2_avg_mask); 5551 env->ifs_clm_avg[2] = 5552 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, 5553 ccx->ifs_t3_avg_mask); 5554 env->ifs_clm_avg[3] = 5555 rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, 5556 ccx->ifs_t4_avg_mask); 5557 5558 env->ifs_clm_cca[0] = 5559 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, 5560 ccx->ifs_t1_cca_mask); 5561 env->ifs_clm_cca[1] = 5562 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, 5563 ccx->ifs_t2_cca_mask); 5564 env->ifs_clm_cca[2] = 5565 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, 5566 ccx->ifs_t3_cca_mask); 5567 env->ifs_clm_cca[3] = 5568 rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, 5569 ccx->ifs_t4_cca_mask); 5570 5571 env->ifs_clm_total_ifs = 5572 rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, 5573 ccx->ifs_total_mask); 5574 5575 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n", 5576 env->ifs_clm_total_ifs); 5577 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5578 "{Tx, EDCCA_exclu_cca} = {%d, %d}\n", 5579 env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca); 5580 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5581 "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n", 5582 env->ifs_clm_cckfa, env->ifs_clm_ofdmfa); 5583 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5584 "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n", 5585 env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa); 5586 5587 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n"); 5588 for (i = 0; i < RTW89_IFS_CLM_NUM; i++) 5589 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5590 "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i], 5591 env->ifs_clm_avg[i], env->ifs_clm_cca[i]); 5592 5593 rtw89_phy_ifs_clm_get_utility(rtwdev); 5594 5595 return true; 5596 } 5597 5598 static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, 5599 struct rtw89_ccx_para_info *para) 5600 { 5601 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5602 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5603 const struct rtw89_ccx_regs *ccx = phy->ccx; 5604 u32 period = 0; 5605 u32 unit_idx = 0; 5606 5607 if (para->mntr_time == 0) { 5608 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5609 "[WARN] MNTR_TIME is 0\n"); 5610 return -EINVAL; 5611 } 5612 5613 if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv)) 5614 return -EINVAL; 5615 5616 if (para->mntr_time != env->ifs_clm_mntr_time) { 5617 rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, 5618 &period, &unit_idx); 5619 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, 5620 ccx->ifs_clm_period_mask, period); 5621 rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, 5622 ccx->ifs_clm_cnt_unit_mask, 5623 unit_idx); 5624 5625 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5626 "Update IFS-CLM time ((%d)) -> ((%d))\n", 5627 env->ifs_clm_mntr_time, para->mntr_time); 5628 5629 env->ifs_clm_mntr_time = para->mntr_time; 5630 env->ccx_period = (u16)period; 5631 env->ccx_unit_idx = (u8)unit_idx; 5632 } 5633 5634 if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) { 5635 env->ifs_clm_app = para->ifs_clm_app; 5636 rtw89_phy_ifs_clm_set_th_reg(rtwdev); 5637 } 5638 5639 return 0; 5640 } 5641 5642 void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) 5643 { 5644 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 5645 struct rtw89_ccx_para_info para = {0}; 5646 u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL; 5647 5648 env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL; 5649 if (env->ccx_manual_ctrl) { 5650 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5651 "CCX in manual ctrl\n"); 5652 return; 5653 } 5654 5655 /* only ifs_clm for now */ 5656 if (rtw89_phy_ifs_clm_get_result(rtwdev)) 5657 env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM; 5658 5659 rtw89_phy_ccx_racing_release(rtwdev); 5660 para.mntr_time = 1900; 5661 para.rac_lv = RTW89_RAC_LV_1; 5662 para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; 5663 5664 if (rtw89_phy_ifs_clm_set(rtwdev, ¶) == 0) 5665 chk_result |= RTW89_PHY_ENV_MON_IFS_CLM; 5666 if (chk_result) 5667 rtw89_phy_ccx_trigger(rtwdev); 5668 5669 rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, 5670 "get_result=0x%x, chk_result:0x%x\n", 5671 env->ccx_watchdog_result, chk_result); 5672 } 5673 5674 static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page) 5675 { 5676 if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM || 5677 *ie_page == RTW89_RSVD_9) 5678 return false; 5679 else if (*ie_page > RTW89_RSVD_9) 5680 *ie_page -= 1; 5681 5682 return true; 5683 } 5684 5685 static u32 rtw89_phy_get_ie_bitmap_addr(enum rtw89_phy_status_bitmap ie_page) 5686 { 5687 static const u8 ie_page_shift = 2; 5688 5689 return R_PHY_STS_BITMAP_ADDR_START + (ie_page << ie_page_shift); 5690 } 5691 5692 static u32 rtw89_physts_get_ie_bitmap(struct rtw89_dev *rtwdev, 5693 enum rtw89_phy_status_bitmap ie_page, 5694 enum rtw89_phy_idx phy_idx) 5695 { 5696 u32 addr; 5697 5698 if (!rtw89_physts_ie_page_valid(&ie_page)) 5699 return 0; 5700 5701 addr = rtw89_phy_get_ie_bitmap_addr(ie_page); 5702 5703 return rtw89_phy_read32_idx(rtwdev, addr, MASKDWORD, phy_idx); 5704 } 5705 5706 static void rtw89_physts_set_ie_bitmap(struct rtw89_dev *rtwdev, 5707 enum rtw89_phy_status_bitmap ie_page, 5708 u32 val, enum rtw89_phy_idx phy_idx) 5709 { 5710 const struct rtw89_chip_info *chip = rtwdev->chip; 5711 u32 addr; 5712 5713 if (!rtw89_physts_ie_page_valid(&ie_page)) 5714 return; 5715 5716 if (chip->chip_id == RTL8852A) 5717 val &= B_PHY_STS_BITMAP_MSK_52A; 5718 5719 addr = rtw89_phy_get_ie_bitmap_addr(ie_page); 5720 rtw89_phy_write32_idx(rtwdev, addr, MASKDWORD, val, phy_idx); 5721 } 5722 5723 static void rtw89_physts_enable_ie_bitmap(struct rtw89_dev *rtwdev, 5724 enum rtw89_phy_status_bitmap bitmap, 5725 enum rtw89_phy_status_ie_type ie, 5726 bool enable, enum rtw89_phy_idx phy_idx) 5727 { 5728 u32 val = rtw89_physts_get_ie_bitmap(rtwdev, bitmap, phy_idx); 5729 5730 if (enable) 5731 val |= BIT(ie); 5732 else 5733 val &= ~BIT(ie); 5734 5735 rtw89_physts_set_ie_bitmap(rtwdev, bitmap, val, phy_idx); 5736 } 5737 5738 static void rtw89_physts_enable_fail_report(struct rtw89_dev *rtwdev, 5739 bool enable, 5740 enum rtw89_phy_idx phy_idx) 5741 { 5742 const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; 5743 const struct rtw89_physts_regs *physts = phy->physts; 5744 5745 if (enable) { 5746 rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr, 5747 physts->dis_trigger_fail_mask, phy_idx); 5748 rtw89_phy_write32_idx_clr(rtwdev, physts->setting_addr, 5749 physts->dis_trigger_brk_mask, phy_idx); 5750 } else { 5751 rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr, 5752 physts->dis_trigger_fail_mask, phy_idx); 5753 rtw89_phy_write32_idx_set(rtwdev, physts->setting_addr, 5754 physts->dis_trigger_brk_mask, phy_idx); 5755 } 5756 } 5757 5758 static void __rtw89_physts_parsing_init(struct rtw89_dev *rtwdev, 5759 enum rtw89_phy_idx phy_idx) 5760 { 5761 u8 i; 5762 5763 rtw89_physts_enable_fail_report(rtwdev, false, phy_idx); 5764 5765 for (i = 0; i < RTW89_PHYSTS_BITMAP_NUM; i++) { 5766 if (i >= RTW89_CCK_PKT) 5767 rtw89_physts_enable_ie_bitmap(rtwdev, i, 5768 RTW89_PHYSTS_IE09_FTR_0, 5769 true, phy_idx); 5770 if ((i >= RTW89_CCK_BRK && i <= RTW89_VHT_MU) || 5771 (i >= RTW89_RSVD_9 && i <= RTW89_CCK_PKT)) 5772 continue; 5773 rtw89_physts_enable_ie_bitmap(rtwdev, i, 5774 RTW89_PHYSTS_IE24_OFDM_TD_PATH_A, 5775 true, phy_idx); 5776 } 5777 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_VHT_PKT, 5778 RTW89_PHYSTS_IE13_DL_MU_DEF, true, phy_idx); 5779 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_HE_PKT, 5780 RTW89_PHYSTS_IE13_DL_MU_DEF, true, phy_idx); 5781 5782 /* force IE01 for channel index, only channel field is valid */ 5783 rtw89_physts_enable_ie_bitmap(rtwdev, RTW89_CCK_PKT, 5784 RTW89_PHYSTS_IE01_CMN_OFDM, true, phy_idx); 5785 } 5786 5787 static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev) 5788 { 5789 __rtw89_physts_parsing_init(rtwdev, RTW89_PHY_0); 5790 if (rtwdev->dbcc_en) 5791 __rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1); 5792 } 5793 5794 static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) 5795 { 5796 const struct rtw89_chip_info *chip = rtwdev->chip; 5797 struct rtw89_dig_info *dig = &rtwdev->dig; 5798 const struct rtw89_phy_dig_gain_cfg *cfg; 5799 const char *msg; 5800 u8 i; 5801 s8 gain_base; 5802 s8 *gain_arr; 5803 u32 tmp; 5804 5805 switch (type) { 5806 case RTW89_DIG_GAIN_LNA_G: 5807 gain_arr = dig->lna_gain_g; 5808 gain_base = LNA0_GAIN; 5809 cfg = chip->dig_table->cfg_lna_g; 5810 msg = "lna_gain_g"; 5811 break; 5812 case RTW89_DIG_GAIN_TIA_G: 5813 gain_arr = dig->tia_gain_g; 5814 gain_base = TIA0_GAIN_G; 5815 cfg = chip->dig_table->cfg_tia_g; 5816 msg = "tia_gain_g"; 5817 break; 5818 case RTW89_DIG_GAIN_LNA_A: 5819 gain_arr = dig->lna_gain_a; 5820 gain_base = LNA0_GAIN; 5821 cfg = chip->dig_table->cfg_lna_a; 5822 msg = "lna_gain_a"; 5823 break; 5824 case RTW89_DIG_GAIN_TIA_A: 5825 gain_arr = dig->tia_gain_a; 5826 gain_base = TIA0_GAIN_A; 5827 cfg = chip->dig_table->cfg_tia_a; 5828 msg = "tia_gain_a"; 5829 break; 5830 default: 5831 return; 5832 } 5833 5834 for (i = 0; i < cfg->size; i++) { 5835 tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr, 5836 cfg->table[i].mask); 5837 tmp >>= DIG_GAIN_SHIFT; 5838 gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base; 5839 gain_base += DIG_GAIN; 5840 5841 rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n", 5842 msg, i, gain_arr[i]); 5843 } 5844 } 5845 5846 static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev) 5847 { 5848 struct rtw89_dig_info *dig = &rtwdev->dig; 5849 u32 tmp; 5850 u8 i; 5851 5852 if (!rtwdev->hal.support_igi) 5853 return; 5854 5855 tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW, 5856 B_PATH0_IB_PKPW_MSK); 5857 dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT); 5858 dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK, 5859 B_PATH0_IB_PBK_MSK); 5860 rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n", 5861 dig->ib_pkpwr, dig->ib_pbk); 5862 5863 for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++) 5864 rtw89_phy_dig_read_gain_table(rtwdev, i); 5865 } 5866 5867 static const u8 rssi_nolink = 22; 5868 static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104}; 5869 static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88}; 5870 static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16}; 5871 static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528}; 5872 5873 static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev) 5874 { 5875 struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; 5876 struct rtw89_dig_info *dig = &rtwdev->dig; 5877 bool is_linked = rtwdev->total_sta_assoc > 0; 5878 5879 if (is_linked) { 5880 dig->igi_rssi = ch_info->rssi_min >> 1; 5881 } else { 5882 rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n"); 5883 dig->igi_rssi = rssi_nolink; 5884 } 5885 } 5886 5887 static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev) 5888 { 5889 struct rtw89_dig_info *dig = &rtwdev->dig; 5890 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); 5891 bool is_linked = rtwdev->total_sta_assoc > 0; 5892 const u16 *fa_th_src = NULL; 5893 5894 switch (chan->band_type) { 5895 case RTW89_BAND_2G: 5896 dig->lna_gain = dig->lna_gain_g; 5897 dig->tia_gain = dig->tia_gain_g; 5898 fa_th_src = is_linked ? fa_th_2g : fa_th_nolink; 5899 dig->force_gaincode_idx_en = false; 5900 dig->dyn_pd_th_en = true; 5901 break; 5902 case RTW89_BAND_5G: 5903 default: 5904 dig->lna_gain = dig->lna_gain_a; 5905 dig->tia_gain = dig->tia_gain_a; 5906 fa_th_src = is_linked ? fa_th_5g : fa_th_nolink; 5907 dig->force_gaincode_idx_en = true; 5908 dig->dyn_pd_th_en = true; 5909 break; 5910 } 5911 memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th)); 5912 memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th)); 5913 } 5914 5915 static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20; 5916 static const u8 igi_max_performance_mode = 0x5a; 5917 static const u8 dynamic_pd_threshold_max; 5918 5919 static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev) 5920 { 5921 struct rtw89_dig_info *dig = &rtwdev->dig; 5922 5923 dig->cur_gaincode.lna_idx = LNA_IDX_MAX; 5924 dig->cur_gaincode.tia_idx = TIA_IDX_MAX; 5925 dig->cur_gaincode.rxb_idx = RXB_IDX_MAX; 5926 dig->force_gaincode.lna_idx = LNA_IDX_MAX; 5927 dig->force_gaincode.tia_idx = TIA_IDX_MAX; 5928 dig->force_gaincode.rxb_idx = RXB_IDX_MAX; 5929 5930 dig->dyn_igi_max = igi_max_performance_mode; 5931 dig->dyn_igi_min = dynamic_igi_min; 5932 dig->dyn_pd_th_max = dynamic_pd_threshold_max; 5933 dig->pd_low_th_ofst = pd_low_th_offset; 5934 dig->is_linked_pre = false; 5935 } 5936 5937 static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev) 5938 { 5939 rtw89_phy_dig_update_gain_para(rtwdev); 5940 rtw89_phy_dig_reset(rtwdev); 5941 } 5942 5943 static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) 5944 { 5945 struct rtw89_dig_info *dig = &rtwdev->dig; 5946 u8 lna_idx; 5947 5948 if (rssi < dig->igi_rssi_th[0]) 5949 lna_idx = RTW89_DIG_GAIN_LNA_IDX6; 5950 else if (rssi < dig->igi_rssi_th[1]) 5951 lna_idx = RTW89_DIG_GAIN_LNA_IDX5; 5952 else if (rssi < dig->igi_rssi_th[2]) 5953 lna_idx = RTW89_DIG_GAIN_LNA_IDX4; 5954 else if (rssi < dig->igi_rssi_th[3]) 5955 lna_idx = RTW89_DIG_GAIN_LNA_IDX3; 5956 else if (rssi < dig->igi_rssi_th[4]) 5957 lna_idx = RTW89_DIG_GAIN_LNA_IDX2; 5958 else 5959 lna_idx = RTW89_DIG_GAIN_LNA_IDX1; 5960 5961 return lna_idx; 5962 } 5963 5964 static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) 5965 { 5966 struct rtw89_dig_info *dig = &rtwdev->dig; 5967 u8 tia_idx; 5968 5969 if (rssi < dig->igi_rssi_th[0]) 5970 tia_idx = RTW89_DIG_GAIN_TIA_IDX1; 5971 else 5972 tia_idx = RTW89_DIG_GAIN_TIA_IDX0; 5973 5974 return tia_idx; 5975 } 5976 5977 #define IB_PBK_BASE 110 5978 #define WB_RSSI_BASE 10 5979 static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, 5980 struct rtw89_agc_gaincode_set *set) 5981 { 5982 struct rtw89_dig_info *dig = &rtwdev->dig; 5983 s8 lna_gain = dig->lna_gain[set->lna_idx]; 5984 s8 tia_gain = dig->tia_gain[set->tia_idx]; 5985 s32 wb_rssi = rssi + lna_gain + tia_gain; 5986 s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE; 5987 u8 rxb_idx; 5988 5989 rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi; 5990 rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX); 5991 5992 rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n", 5993 wb_rssi, rxb_idx_tmp); 5994 5995 return rxb_idx; 5996 } 5997 5998 static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, 5999 struct rtw89_agc_gaincode_set *set) 6000 { 6001 set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi); 6002 set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi); 6003 set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set); 6004 6005 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6006 "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n", 6007 rssi, set->lna_idx, set->tia_idx, set->rxb_idx); 6008 } 6009 6010 #define IGI_OFFSET_MAX 25 6011 #define IGI_OFFSET_MUL 2 6012 static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev) 6013 { 6014 struct rtw89_dig_info *dig = &rtwdev->dig; 6015 struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; 6016 enum rtw89_dig_noisy_level noisy_lv; 6017 u8 igi_offset = dig->fa_rssi_ofst; 6018 u16 fa_ratio = 0; 6019 6020 fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil; 6021 6022 if (fa_ratio < dig->fa_th[0]) 6023 noisy_lv = RTW89_DIG_NOISY_LEVEL0; 6024 else if (fa_ratio < dig->fa_th[1]) 6025 noisy_lv = RTW89_DIG_NOISY_LEVEL1; 6026 else if (fa_ratio < dig->fa_th[2]) 6027 noisy_lv = RTW89_DIG_NOISY_LEVEL2; 6028 else if (fa_ratio < dig->fa_th[3]) 6029 noisy_lv = RTW89_DIG_NOISY_LEVEL3; 6030 else 6031 noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX; 6032 6033 if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2) 6034 igi_offset = 0; 6035 else 6036 igi_offset += noisy_lv * IGI_OFFSET_MUL; 6037 6038 igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX); 6039 dig->fa_rssi_ofst = igi_offset; 6040 6041 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6042 "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n", 6043 dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]); 6044 6045 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6046 "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n", 6047 env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil, 6048 env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil, 6049 noisy_lv, igi_offset); 6050 } 6051 6052 static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx) 6053 { 6054 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6055 6056 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr, 6057 dig_regs->p0_lna_init.mask, lna_idx); 6058 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr, 6059 dig_regs->p1_lna_init.mask, lna_idx); 6060 } 6061 6062 static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx) 6063 { 6064 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6065 6066 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr, 6067 dig_regs->p0_tia_init.mask, tia_idx); 6068 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr, 6069 dig_regs->p1_tia_init.mask, tia_idx); 6070 } 6071 6072 static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx) 6073 { 6074 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6075 6076 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr, 6077 dig_regs->p0_rxb_init.mask, rxb_idx); 6078 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr, 6079 dig_regs->p1_rxb_init.mask, rxb_idx); 6080 } 6081 6082 static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev, 6083 const struct rtw89_agc_gaincode_set set) 6084 { 6085 if (!rtwdev->hal.support_igi) 6086 return; 6087 6088 rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx); 6089 rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx); 6090 rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx); 6091 6092 rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n", 6093 set.lna_idx, set.tia_idx, set.rxb_idx); 6094 } 6095 6096 static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev, 6097 bool enable) 6098 { 6099 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6100 6101 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr, 6102 dig_regs->p0_p20_pagcugc_en.mask, enable); 6103 rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr, 6104 dig_regs->p0_s20_pagcugc_en.mask, enable); 6105 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr, 6106 dig_regs->p1_p20_pagcugc_en.mask, enable); 6107 rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr, 6108 dig_regs->p1_s20_pagcugc_en.mask, enable); 6109 6110 rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable); 6111 } 6112 6113 static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev) 6114 { 6115 struct rtw89_dig_info *dig = &rtwdev->dig; 6116 6117 if (!rtwdev->hal.support_igi) 6118 return; 6119 6120 if (dig->force_gaincode_idx_en) { 6121 rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); 6122 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6123 "Force gaincode index enabled.\n"); 6124 } else { 6125 rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi, 6126 &dig->cur_gaincode); 6127 rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode); 6128 } 6129 } 6130 6131 static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, 6132 bool enable) 6133 { 6134 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); 6135 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6136 enum rtw89_bandwidth cbw = chan->band_width; 6137 struct rtw89_dig_info *dig = &rtwdev->dig; 6138 u8 final_rssi = 0, under_region = dig->pd_low_th_ofst; 6139 u8 ofdm_cca_th; 6140 s8 cck_cca_th; 6141 u32 pd_val = 0; 6142 6143 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) 6144 under_region += PD_TH_SB_FLTR_CMP_VAL; 6145 6146 switch (cbw) { 6147 case RTW89_CHANNEL_WIDTH_40: 6148 under_region += PD_TH_BW40_CMP_VAL; 6149 break; 6150 case RTW89_CHANNEL_WIDTH_80: 6151 under_region += PD_TH_BW80_CMP_VAL; 6152 break; 6153 case RTW89_CHANNEL_WIDTH_160: 6154 under_region += PD_TH_BW160_CMP_VAL; 6155 break; 6156 case RTW89_CHANNEL_WIDTH_20: 6157 fallthrough; 6158 default: 6159 under_region += PD_TH_BW20_CMP_VAL; 6160 break; 6161 } 6162 6163 dig->dyn_pd_th_max = dig->igi_rssi; 6164 6165 final_rssi = min_t(u8, rssi, dig->igi_rssi); 6166 ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region, 6167 PD_TH_MAX_RSSI + under_region); 6168 6169 if (enable) { 6170 pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1; 6171 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6172 "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n", 6173 final_rssi, ofdm_cca_th, under_region, pd_val); 6174 } else { 6175 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6176 "Dynamic PD th disabled, Set PD_low_bd=0\n"); 6177 } 6178 6179 rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, 6180 dig_regs->pd_lower_bound_mask, pd_val); 6181 rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, 6182 dig_regs->pd_spatial_reuse_en, enable); 6183 6184 if (!rtwdev->hal.support_cckpd) 6185 return; 6186 6187 cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI); 6188 pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX); 6189 6190 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6191 "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n", 6192 final_rssi, cck_cca_th, under_region, pd_val); 6193 6194 rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg, 6195 dig_regs->bmode_cca_rssi_limit_en, enable); 6196 rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg, 6197 dig_regs->bmode_rssi_nocca_low_th_mask, pd_val); 6198 } 6199 6200 void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) 6201 { 6202 struct rtw89_dig_info *dig = &rtwdev->dig; 6203 6204 dig->bypass_dig = false; 6205 rtw89_phy_dig_para_reset(rtwdev); 6206 rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); 6207 rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false); 6208 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); 6209 rtw89_phy_dig_update_para(rtwdev); 6210 } 6211 6212 #define IGI_RSSI_MIN 10 6213 #define ABS_IGI_MIN 0xc 6214 void rtw89_phy_dig(struct rtw89_dev *rtwdev) 6215 { 6216 struct rtw89_dig_info *dig = &rtwdev->dig; 6217 bool is_linked = rtwdev->total_sta_assoc > 0; 6218 u8 igi_min; 6219 6220 if (unlikely(dig->bypass_dig)) { 6221 dig->bypass_dig = false; 6222 return; 6223 } 6224 6225 rtw89_phy_dig_update_rssi_info(rtwdev); 6226 6227 if (!dig->is_linked_pre && is_linked) { 6228 rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n"); 6229 rtw89_phy_dig_update_para(rtwdev); 6230 dig->igi_fa_rssi = dig->igi_rssi; 6231 } else if (dig->is_linked_pre && !is_linked) { 6232 rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n"); 6233 rtw89_phy_dig_update_para(rtwdev); 6234 dig->igi_fa_rssi = dig->igi_rssi; 6235 } 6236 dig->is_linked_pre = is_linked; 6237 6238 rtw89_phy_dig_igi_offset_by_env(rtwdev); 6239 6240 igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0); 6241 dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode); 6242 dig->dyn_igi_min = max(igi_min, ABS_IGI_MIN); 6243 6244 if (dig->dyn_igi_max >= dig->dyn_igi_min) { 6245 dig->igi_fa_rssi += dig->fa_rssi_ofst; 6246 dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min, 6247 dig->dyn_igi_max); 6248 } else { 6249 dig->igi_fa_rssi = dig->dyn_igi_max; 6250 } 6251 6252 rtw89_debug(rtwdev, RTW89_DBG_DIG, 6253 "rssi=%03d, dyn_joint(max,min)=(%d,%d), final_rssi=%d\n", 6254 dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, 6255 dig->igi_fa_rssi); 6256 6257 rtw89_phy_dig_config_igi(rtwdev); 6258 6259 rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); 6260 6261 if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max) 6262 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true); 6263 else 6264 rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); 6265 } 6266 6267 static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev, 6268 struct rtw89_sta_link *rtwsta_link) 6269 { 6270 struct rtw89_hal *hal = &rtwdev->hal; 6271 u8 rssi_a, rssi_b; 6272 u32 candidate; 6273 6274 rssi_a = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_A]); 6275 rssi_b = ewma_rssi_read(&rtwsta_link->rssi[RF_PATH_B]); 6276 6277 if (rssi_a > rssi_b + RTW89_TX_DIV_RSSI_RAW_TH) 6278 candidate = RF_A; 6279 else if (rssi_b > rssi_a + RTW89_TX_DIV_RSSI_RAW_TH) 6280 candidate = RF_B; 6281 else 6282 return; 6283 6284 if (hal->antenna_tx == candidate) 6285 return; 6286 6287 hal->antenna_tx = candidate; 6288 rtw89_fw_h2c_txpath_cmac_tbl(rtwdev, rtwsta_link); 6289 6290 if (hal->antenna_tx == RF_A) { 6291 rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x12); 6292 rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x11); 6293 } else if (hal->antenna_tx == RF_B) { 6294 rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, B_P0_RFMODE_MUX, 0x11); 6295 rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, B_P1_RFMODE_MUX, 0x12); 6296 } 6297 } 6298 6299 static void rtw89_phy_tx_path_div_sta_iter(void *data, struct ieee80211_sta *sta) 6300 { 6301 struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); 6302 struct rtw89_dev *rtwdev = rtwsta->rtwdev; 6303 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 6304 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 6305 struct rtw89_vif_link *rtwvif_link; 6306 struct rtw89_sta_link *rtwsta_link; 6307 unsigned int link_id; 6308 bool *done = data; 6309 6310 if (WARN(ieee80211_vif_is_mld(vif), "MLD mix path_div\n")) 6311 return; 6312 6313 if (sta->tdls) 6314 return; 6315 6316 if (*done) 6317 return; 6318 6319 rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) { 6320 rtwvif_link = rtwsta_link->rtwvif_link; 6321 if (rtwvif_link->wifi_role != RTW89_WIFI_ROLE_STATION) 6322 continue; 6323 6324 *done = true; 6325 __rtw89_phy_tx_path_div_sta_iter(rtwdev, rtwsta_link); 6326 return; 6327 } 6328 } 6329 6330 void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev) 6331 { 6332 struct rtw89_hal *hal = &rtwdev->hal; 6333 bool done = false; 6334 6335 if (!hal->tx_path_diversity) 6336 return; 6337 6338 ieee80211_iterate_stations_atomic(rtwdev->hw, 6339 rtw89_phy_tx_path_div_sta_iter, 6340 &done); 6341 } 6342 6343 #define ANTDIV_MAIN 0 6344 #define ANTDIV_AUX 1 6345 6346 static void rtw89_phy_antdiv_set_ant(struct rtw89_dev *rtwdev) 6347 { 6348 struct rtw89_hal *hal = &rtwdev->hal; 6349 u8 default_ant, optional_ant; 6350 6351 if (!hal->ant_diversity || hal->antenna_tx == 0) 6352 return; 6353 6354 if (hal->antenna_tx == RF_B) { 6355 default_ant = ANTDIV_AUX; 6356 optional_ant = ANTDIV_MAIN; 6357 } else { 6358 default_ant = ANTDIV_MAIN; 6359 optional_ant = ANTDIV_AUX; 6360 } 6361 6362 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_CGCS_CTRL, 6363 default_ant, RTW89_PHY_0); 6364 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ORI, 6365 default_ant, RTW89_PHY_0); 6366 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_RX_ALT, 6367 optional_ant, RTW89_PHY_0); 6368 rtw89_phy_write32_idx(rtwdev, R_P0_ANTSEL, B_P0_ANTSEL_TX_ORI, 6369 default_ant, RTW89_PHY_0); 6370 } 6371 6372 static void rtw89_phy_swap_hal_antenna(struct rtw89_dev *rtwdev) 6373 { 6374 struct rtw89_hal *hal = &rtwdev->hal; 6375 6376 hal->antenna_rx = hal->antenna_rx == RF_A ? RF_B : RF_A; 6377 hal->antenna_tx = hal->antenna_rx; 6378 } 6379 6380 static void rtw89_phy_antdiv_decision_state(struct rtw89_dev *rtwdev) 6381 { 6382 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6383 struct rtw89_hal *hal = &rtwdev->hal; 6384 bool no_change = false; 6385 u8 main_rssi, aux_rssi; 6386 u8 main_evm, aux_evm; 6387 u32 candidate; 6388 6389 antdiv->get_stats = false; 6390 antdiv->training_count = 0; 6391 6392 main_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->main_stats); 6393 main_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->main_stats); 6394 aux_rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->aux_stats); 6395 aux_evm = rtw89_phy_antdiv_sts_instance_get_evm(&antdiv->aux_stats); 6396 6397 if (main_evm > aux_evm + ANTDIV_EVM_DIFF_TH) 6398 candidate = RF_A; 6399 else if (aux_evm > main_evm + ANTDIV_EVM_DIFF_TH) 6400 candidate = RF_B; 6401 else if (main_rssi > aux_rssi + RTW89_TX_DIV_RSSI_RAW_TH) 6402 candidate = RF_A; 6403 else if (aux_rssi > main_rssi + RTW89_TX_DIV_RSSI_RAW_TH) 6404 candidate = RF_B; 6405 else 6406 no_change = true; 6407 6408 if (no_change) { 6409 /* swap back from training antenna to original */ 6410 rtw89_phy_swap_hal_antenna(rtwdev); 6411 return; 6412 } 6413 6414 hal->antenna_tx = candidate; 6415 hal->antenna_rx = candidate; 6416 } 6417 6418 static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev) 6419 { 6420 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6421 u64 state_period; 6422 6423 if (antdiv->training_count % 2 == 0) { 6424 if (antdiv->training_count == 0) 6425 rtw89_phy_antdiv_sts_reset(rtwdev); 6426 6427 antdiv->get_stats = true; 6428 state_period = msecs_to_jiffies(ANTDIV_TRAINNING_INTVL); 6429 } else { 6430 antdiv->get_stats = false; 6431 state_period = msecs_to_jiffies(ANTDIV_DELAY); 6432 6433 rtw89_phy_swap_hal_antenna(rtwdev); 6434 rtw89_phy_antdiv_set_ant(rtwdev); 6435 } 6436 6437 antdiv->training_count++; 6438 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 6439 state_period); 6440 } 6441 6442 void rtw89_phy_antdiv_work(struct work_struct *work) 6443 { 6444 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6445 antdiv_work.work); 6446 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6447 6448 mutex_lock(&rtwdev->mutex); 6449 6450 if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) { 6451 rtw89_phy_antdiv_training_state(rtwdev); 6452 } else { 6453 rtw89_phy_antdiv_decision_state(rtwdev); 6454 rtw89_phy_antdiv_set_ant(rtwdev); 6455 } 6456 6457 mutex_unlock(&rtwdev->mutex); 6458 } 6459 6460 void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev) 6461 { 6462 struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; 6463 struct rtw89_hal *hal = &rtwdev->hal; 6464 u8 rssi, rssi_pre; 6465 6466 if (!hal->ant_diversity || hal->ant_diversity_fixed) 6467 return; 6468 6469 rssi = rtw89_phy_antdiv_sts_instance_get_rssi(&antdiv->target_stats); 6470 rssi_pre = antdiv->rssi_pre; 6471 antdiv->rssi_pre = rssi; 6472 rtw89_phy_antdiv_sts_instance_reset(&antdiv->target_stats); 6473 6474 if (abs((int)rssi - (int)rssi_pre) < ANTDIV_RSSI_DIFF_TH) 6475 return; 6476 6477 antdiv->training_count = 0; 6478 ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0); 6479 } 6480 6481 static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev) 6482 { 6483 rtw89_phy_ccx_top_setting_init(rtwdev); 6484 rtw89_phy_ifs_clm_setting_init(rtwdev); 6485 } 6486 6487 static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev) 6488 { 6489 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6490 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6491 6492 memset(edcca_bak, 0, sizeof(*edcca_bak)); 6493 6494 if (rtwdev->chip->chip_id == RTL8922A && rtwdev->hal.cv == CHIP_CAV) { 6495 rtw89_phy_set_phy_regs(rtwdev, R_TXGATING, B_TXGATING_EN, 0); 6496 rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_VAL, 2); 6497 rtw89_phy_set_phy_regs(rtwdev, R_CTLTOP, B_CTLTOP_ON, 1); 6498 rtw89_phy_set_phy_regs(rtwdev, R_SPOOF_CG, B_SPOOF_CG_EN, 0); 6499 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_CG_EN, 0); 6500 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 0); 6501 rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 0); 6502 rtw89_phy_set_phy_regs(rtwdev, R_SEGSND, B_SEGSND_EN, 1); 6503 rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1); 6504 } 6505 6506 rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st, 6507 edcca_regs->tx_collision_t2r_st_mask, 0x29); 6508 } 6509 6510 void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) 6511 { 6512 rtw89_phy_stat_init(rtwdev); 6513 6514 rtw89_chip_bb_sethw(rtwdev); 6515 6516 rtw89_phy_env_monitor_init(rtwdev); 6517 rtw89_physts_parsing_init(rtwdev); 6518 rtw89_phy_dig_init(rtwdev); 6519 rtw89_phy_cfo_init(rtwdev); 6520 rtw89_phy_bb_wrap_init(rtwdev); 6521 rtw89_phy_edcca_init(rtwdev); 6522 rtw89_phy_ch_info_init(rtwdev); 6523 rtw89_phy_ul_tb_info_init(rtwdev); 6524 rtw89_phy_antdiv_init(rtwdev); 6525 rtw89_chip_rfe_gpio(rtwdev); 6526 rtw89_phy_antdiv_set_ant(rtwdev); 6527 6528 rtw89_chip_rfk_hw_init(rtwdev); 6529 rtw89_phy_init_rf_nctl(rtwdev); 6530 rtw89_chip_rfk_init(rtwdev); 6531 rtw89_chip_set_txpwr_ctrl(rtwdev); 6532 rtw89_chip_power_trim(rtwdev); 6533 rtw89_chip_cfg_txrx_path(rtwdev); 6534 } 6535 6536 void rtw89_phy_dm_reinit(struct rtw89_dev *rtwdev) 6537 { 6538 rtw89_phy_env_monitor_init(rtwdev); 6539 rtw89_physts_parsing_init(rtwdev); 6540 } 6541 6542 void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, 6543 struct rtw89_vif_link *rtwvif_link) 6544 { 6545 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 6546 const struct rtw89_chip_info *chip = rtwdev->chip; 6547 const struct rtw89_reg_def *bss_clr_vld = &chip->bss_clr_vld; 6548 enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx; 6549 struct ieee80211_bss_conf *bss_conf; 6550 u8 bss_color; 6551 6552 rcu_read_lock(); 6553 6554 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 6555 if (!bss_conf->he_support || !vif->cfg.assoc) { 6556 rcu_read_unlock(); 6557 return; 6558 } 6559 6560 bss_color = bss_conf->he_bss_color.color; 6561 6562 rcu_read_unlock(); 6563 6564 rtw89_phy_write32_idx(rtwdev, bss_clr_vld->addr, bss_clr_vld->mask, 0x1, 6565 phy_idx); 6566 rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT, 6567 bss_color, phy_idx); 6568 rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID, 6569 vif->cfg.aid, phy_idx); 6570 } 6571 6572 static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc) 6573 { 6574 return desc->ch != 0; 6575 } 6576 6577 static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc, 6578 const struct rtw89_chan *chan) 6579 { 6580 if (!rfk_chan_validate_desc(desc)) 6581 return false; 6582 6583 if (desc->ch != chan->channel) 6584 return false; 6585 6586 if (desc->has_band && desc->band != chan->band_type) 6587 return false; 6588 6589 if (desc->has_bw && desc->bw != chan->band_width) 6590 return false; 6591 6592 return true; 6593 } 6594 6595 struct rfk_chan_iter_data { 6596 const struct rtw89_rfk_chan_desc desc; 6597 unsigned int found; 6598 }; 6599 6600 static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data) 6601 { 6602 struct rfk_chan_iter_data *iter_data = data; 6603 6604 if (rfk_chan_is_equivalent(&iter_data->desc, chan)) 6605 iter_data->found++; 6606 6607 return 0; 6608 } 6609 6610 u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev, 6611 const struct rtw89_rfk_chan_desc *desc, u8 desc_nr, 6612 const struct rtw89_chan *target_chan) 6613 { 6614 int sel = -1; 6615 u8 i; 6616 6617 for (i = 0; i < desc_nr; i++) { 6618 struct rfk_chan_iter_data iter_data = { 6619 .desc = desc[i], 6620 }; 6621 6622 if (rfk_chan_is_equivalent(&desc[i], target_chan)) 6623 return i; 6624 6625 rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data); 6626 if (!iter_data.found && sel == -1) 6627 sel = i; 6628 } 6629 6630 if (sel == -1) { 6631 rtw89_debug(rtwdev, RTW89_DBG_RFK, 6632 "no idle rfk entry; force replace the first\n"); 6633 sel = 0; 6634 } 6635 6636 return sel; 6637 } 6638 EXPORT_SYMBOL(rtw89_rfk_chan_lookup); 6639 6640 static void 6641 _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6642 { 6643 rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data); 6644 } 6645 6646 static void 6647 _rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6648 { 6649 rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); 6650 } 6651 6652 static void 6653 _rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6654 { 6655 rtw89_phy_write32_set(rtwdev, def->addr, def->mask); 6656 } 6657 6658 static void 6659 _rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6660 { 6661 rtw89_phy_write32_clr(rtwdev, def->addr, def->mask); 6662 } 6663 6664 static void 6665 _rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) 6666 { 6667 udelay(def->data); 6668 } 6669 6670 static void 6671 (*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = { 6672 [RTW89_RFK_F_WRF] = _rfk_write_rf, 6673 [RTW89_RFK_F_WM] = _rfk_write32_mask, 6674 [RTW89_RFK_F_WS] = _rfk_write32_set, 6675 [RTW89_RFK_F_WC] = _rfk_write32_clr, 6676 [RTW89_RFK_F_DELAY] = _rfk_delay, 6677 }; 6678 6679 static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM); 6680 6681 void 6682 rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl) 6683 { 6684 const struct rtw89_reg5_def *p = tbl->defs; 6685 const struct rtw89_reg5_def *end = tbl->defs + tbl->size; 6686 6687 for (; p < end; p++) 6688 _rfk_handler[p->flag](rtwdev, p); 6689 } 6690 EXPORT_SYMBOL(rtw89_rfk_parser); 6691 6692 #define RTW89_TSSI_FAST_MODE_NUM 4 6693 6694 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_flat[RTW89_TSSI_FAST_MODE_NUM] = { 6695 {0xD934, 0xff0000}, 6696 {0xD934, 0xff000000}, 6697 {0xD938, 0xff}, 6698 {0xD934, 0xff00}, 6699 }; 6700 6701 static const struct rtw89_reg_def rtw89_tssi_fastmode_regs_level[RTW89_TSSI_FAST_MODE_NUM] = { 6702 {0xD930, 0xff0000}, 6703 {0xD930, 0xff000000}, 6704 {0xD934, 0xff}, 6705 {0xD930, 0xff00}, 6706 }; 6707 6708 static 6709 void rtw89_phy_tssi_ctrl_set_fast_mode_cfg(struct rtw89_dev *rtwdev, 6710 enum rtw89_mac_idx mac_idx, 6711 enum rtw89_tssi_bandedge_cfg bandedge_cfg, 6712 u32 val) 6713 { 6714 const struct rtw89_reg_def *regs; 6715 u32 reg; 6716 int i; 6717 6718 if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) 6719 regs = rtw89_tssi_fastmode_regs_flat; 6720 else 6721 regs = rtw89_tssi_fastmode_regs_level; 6722 6723 for (i = 0; i < RTW89_TSSI_FAST_MODE_NUM; i++) { 6724 reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); 6725 rtw89_write32_mask(rtwdev, reg, regs[i].mask, val); 6726 } 6727 } 6728 6729 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_flat[RTW89_TSSI_SBW_NUM] = { 6730 {0xD91C, 0xff000000}, 6731 {0xD920, 0xff}, 6732 {0xD920, 0xff00}, 6733 {0xD920, 0xff0000}, 6734 {0xD920, 0xff000000}, 6735 {0xD924, 0xff}, 6736 {0xD924, 0xff00}, 6737 {0xD914, 0xff000000}, 6738 {0xD918, 0xff}, 6739 {0xD918, 0xff00}, 6740 {0xD918, 0xff0000}, 6741 {0xD918, 0xff000000}, 6742 {0xD91C, 0xff}, 6743 {0xD91C, 0xff00}, 6744 {0xD91C, 0xff0000}, 6745 }; 6746 6747 static const struct rtw89_reg_def rtw89_tssi_bandedge_regs_level[RTW89_TSSI_SBW_NUM] = { 6748 {0xD910, 0xff}, 6749 {0xD910, 0xff00}, 6750 {0xD910, 0xff0000}, 6751 {0xD910, 0xff000000}, 6752 {0xD914, 0xff}, 6753 {0xD914, 0xff00}, 6754 {0xD914, 0xff0000}, 6755 {0xD908, 0xff}, 6756 {0xD908, 0xff00}, 6757 {0xD908, 0xff0000}, 6758 {0xD908, 0xff000000}, 6759 {0xD90C, 0xff}, 6760 {0xD90C, 0xff00}, 6761 {0xD90C, 0xff0000}, 6762 {0xD90C, 0xff000000}, 6763 }; 6764 6765 void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, 6766 enum rtw89_mac_idx mac_idx, 6767 enum rtw89_tssi_bandedge_cfg bandedge_cfg) 6768 { 6769 const struct rtw89_chip_info *chip = rtwdev->chip; 6770 const struct rtw89_reg_def *regs; 6771 const u32 *data; 6772 u32 reg; 6773 int i; 6774 6775 if (bandedge_cfg >= RTW89_TSSI_CFG_NUM) 6776 return; 6777 6778 if (bandedge_cfg == RTW89_TSSI_BANDEDGE_FLAT) 6779 regs = rtw89_tssi_bandedge_regs_flat; 6780 else 6781 regs = rtw89_tssi_bandedge_regs_level; 6782 6783 data = chip->tssi_dbw_table->data[bandedge_cfg]; 6784 6785 for (i = 0; i < RTW89_TSSI_SBW_NUM; i++) { 6786 reg = rtw89_mac_reg_by_idx(rtwdev, regs[i].addr, mac_idx); 6787 rtw89_write32_mask(rtwdev, reg, regs[i].mask, data[i]); 6788 } 6789 6790 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BANDEDGE_CFG, mac_idx); 6791 rtw89_write32_mask(rtwdev, reg, B_AX_BANDEDGE_CFG_IDX_MASK, bandedge_cfg); 6792 6793 rtw89_phy_tssi_ctrl_set_fast_mode_cfg(rtwdev, mac_idx, bandedge_cfg, 6794 data[RTW89_TSSI_SBW20]); 6795 } 6796 EXPORT_SYMBOL(rtw89_phy_tssi_ctrl_set_bandedge_cfg); 6797 6798 static 6799 const u8 rtw89_ch_base_table[16] = {1, 0xff, 6800 36, 100, 132, 149, 0xff, 6801 1, 33, 65, 97, 129, 161, 193, 225, 0xff}; 6802 #define RTW89_CH_BASE_IDX_2G 0 6803 #define RTW89_CH_BASE_IDX_5G_FIRST 2 6804 #define RTW89_CH_BASE_IDX_5G_LAST 5 6805 #define RTW89_CH_BASE_IDX_6G_FIRST 7 6806 #define RTW89_CH_BASE_IDX_6G_LAST 14 6807 6808 #define RTW89_CH_BASE_IDX_MASK GENMASK(7, 4) 6809 #define RTW89_CH_OFFSET_MASK GENMASK(3, 0) 6810 6811 u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band) 6812 { 6813 u8 chan_idx; 6814 u8 last, first; 6815 u8 idx; 6816 6817 switch (band) { 6818 case RTW89_BAND_2G: 6819 chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, RTW89_CH_BASE_IDX_2G) | 6820 FIELD_PREP(RTW89_CH_OFFSET_MASK, central_ch); 6821 return chan_idx; 6822 case RTW89_BAND_5G: 6823 first = RTW89_CH_BASE_IDX_5G_FIRST; 6824 last = RTW89_CH_BASE_IDX_5G_LAST; 6825 break; 6826 case RTW89_BAND_6G: 6827 first = RTW89_CH_BASE_IDX_6G_FIRST; 6828 last = RTW89_CH_BASE_IDX_6G_LAST; 6829 break; 6830 default: 6831 rtw89_warn(rtwdev, "Unsupported band %d\n", band); 6832 return 0; 6833 } 6834 6835 for (idx = last; idx >= first; idx--) 6836 if (central_ch >= rtw89_ch_base_table[idx]) 6837 break; 6838 6839 if (idx < first) { 6840 rtw89_warn(rtwdev, "Unknown band %d channel %d\n", band, central_ch); 6841 return 0; 6842 } 6843 6844 chan_idx = FIELD_PREP(RTW89_CH_BASE_IDX_MASK, idx) | 6845 FIELD_PREP(RTW89_CH_OFFSET_MASK, 6846 (central_ch - rtw89_ch_base_table[idx]) >> 1); 6847 return chan_idx; 6848 } 6849 EXPORT_SYMBOL(rtw89_encode_chan_idx); 6850 6851 void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx, 6852 u8 *ch, enum nl80211_band *band) 6853 { 6854 u8 idx, offset; 6855 6856 idx = FIELD_GET(RTW89_CH_BASE_IDX_MASK, chan_idx); 6857 offset = FIELD_GET(RTW89_CH_OFFSET_MASK, chan_idx); 6858 6859 if (idx == RTW89_CH_BASE_IDX_2G) { 6860 *band = NL80211_BAND_2GHZ; 6861 *ch = offset; 6862 return; 6863 } 6864 6865 *band = idx <= RTW89_CH_BASE_IDX_5G_LAST ? NL80211_BAND_5GHZ : NL80211_BAND_6GHZ; 6866 *ch = rtw89_ch_base_table[idx] + (offset << 1); 6867 } 6868 EXPORT_SYMBOL(rtw89_decode_chan_idx); 6869 6870 void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan) 6871 { 6872 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6873 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 6874 6875 if (scan) { 6876 edcca_bak->a = 6877 rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, 6878 edcca_regs->edcca_mask); 6879 edcca_bak->p = 6880 rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, 6881 edcca_regs->edcca_p_mask); 6882 edcca_bak->ppdu = 6883 rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level, 6884 edcca_regs->ppdu_mask); 6885 6886 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6887 edcca_regs->edcca_mask, EDCCA_MAX); 6888 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6889 edcca_regs->edcca_p_mask, EDCCA_MAX); 6890 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6891 edcca_regs->ppdu_mask, EDCCA_MAX); 6892 } else { 6893 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6894 edcca_regs->edcca_mask, 6895 edcca_bak->a); 6896 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 6897 edcca_regs->edcca_p_mask, 6898 edcca_bak->p); 6899 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 6900 edcca_regs->ppdu_mask, 6901 edcca_bak->ppdu); 6902 } 6903 } 6904 6905 static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) 6906 { 6907 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 6908 bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80; 6909 s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80; 6910 u8 path, per20_bitmap; 6911 u8 pwdb[8]; 6912 u32 tmp; 6913 6914 if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA)) 6915 return; 6916 6917 if (rtwdev->chip->chip_id == RTL8922A) 6918 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6919 edcca_regs->rpt_sel_be_mask, 0); 6920 6921 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6922 edcca_regs->rpt_sel_mask, 0); 6923 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6924 path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK); 6925 flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80); 6926 flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40); 6927 flag_s20 = u32_get_bits(tmp, B_EDCCA_RPT_B_S20); 6928 flag_p20 = u32_get_bits(tmp, B_EDCCA_RPT_B_P20); 6929 flag_fb = u32_get_bits(tmp, B_EDCCA_RPT_B_FB); 6930 pwdb_s20 = u32_get_bits(tmp, MASKBYTE1); 6931 pwdb_p20 = u32_get_bits(tmp, MASKBYTE2); 6932 pwdb_fb = u32_get_bits(tmp, MASKBYTE3); 6933 6934 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6935 edcca_regs->rpt_sel_mask, 4); 6936 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6937 pwdb_s80 = u32_get_bits(tmp, MASKBYTE1); 6938 pwdb_s40 = u32_get_bits(tmp, MASKBYTE2); 6939 6940 per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a, 6941 MASKBYTE0); 6942 6943 if (rtwdev->chip->chip_id == RTL8922A) { 6944 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6945 edcca_regs->rpt_sel_be_mask, 4); 6946 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6947 pwdb[0] = u32_get_bits(tmp, MASKBYTE3); 6948 pwdb[1] = u32_get_bits(tmp, MASKBYTE2); 6949 pwdb[2] = u32_get_bits(tmp, MASKBYTE1); 6950 pwdb[3] = u32_get_bits(tmp, MASKBYTE0); 6951 6952 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, 6953 edcca_regs->rpt_sel_be_mask, 5); 6954 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); 6955 pwdb[4] = u32_get_bits(tmp, MASKBYTE3); 6956 pwdb[5] = u32_get_bits(tmp, MASKBYTE2); 6957 pwdb[6] = u32_get_bits(tmp, MASKBYTE1); 6958 pwdb[7] = u32_get_bits(tmp, MASKBYTE0); 6959 } else { 6960 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6961 edcca_regs->rpt_sel_mask, 0); 6962 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6963 pwdb[0] = u32_get_bits(tmp, MASKBYTE3); 6964 pwdb[1] = u32_get_bits(tmp, MASKBYTE2); 6965 6966 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6967 edcca_regs->rpt_sel_mask, 1); 6968 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6969 pwdb[2] = u32_get_bits(tmp, MASKBYTE3); 6970 pwdb[3] = u32_get_bits(tmp, MASKBYTE2); 6971 6972 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6973 edcca_regs->rpt_sel_mask, 2); 6974 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6975 pwdb[4] = u32_get_bits(tmp, MASKBYTE3); 6976 pwdb[5] = u32_get_bits(tmp, MASKBYTE2); 6977 6978 rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, 6979 edcca_regs->rpt_sel_mask, 3); 6980 tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); 6981 pwdb[6] = u32_get_bits(tmp, MASKBYTE3); 6982 pwdb[7] = u32_get_bits(tmp, MASKBYTE2); 6983 } 6984 6985 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6986 "[EDCCA]: edcca_bitmap = %04x\n", per20_bitmap); 6987 6988 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6989 "[EDCCA]: pwdb per20{0,1,2,3,4,5,6,7} = {%d,%d,%d,%d,%d,%d,%d,%d}(dBm)\n", 6990 pwdb[0], pwdb[1], pwdb[2], pwdb[3], pwdb[4], pwdb[5], 6991 pwdb[6], pwdb[7]); 6992 6993 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6994 "[EDCCA]: path=%d, flag {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}\n", 6995 path, flag_fb, flag_p20, flag_s20, flag_s40, flag_s80); 6996 6997 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 6998 "[EDCCA]: pwdb {FB,p20,s20,s40,s80} = {%d,%d,%d,%d,%d}(dBm)\n", 6999 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80); 7000 } 7001 7002 static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev) 7003 { 7004 struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; 7005 bool is_linked = rtwdev->total_sta_assoc > 0; 7006 u8 rssi_min = ch_info->rssi_min >> 1; 7007 u8 edcca_thre; 7008 7009 if (!is_linked) { 7010 edcca_thre = EDCCA_MAX; 7011 } else { 7012 edcca_thre = rssi_min - RSSI_UNIT_CONVER + EDCCA_UNIT_CONVER - 7013 EDCCA_TH_REF; 7014 edcca_thre = max_t(u8, edcca_thre, EDCCA_TH_L2H_LB); 7015 } 7016 7017 return edcca_thre; 7018 } 7019 7020 void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev) 7021 { 7022 const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; 7023 struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; 7024 u8 th; 7025 7026 th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev); 7027 if (th == edcca_bak->th_old) 7028 return; 7029 7030 edcca_bak->th_old = th; 7031 7032 rtw89_debug(rtwdev, RTW89_DBG_EDCCA, 7033 "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th); 7034 7035 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 7036 edcca_regs->edcca_mask, th); 7037 rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, 7038 edcca_regs->edcca_p_mask, th); 7039 rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, 7040 edcca_regs->ppdu_mask, th); 7041 } 7042 7043 void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev) 7044 { 7045 struct rtw89_hal *hal = &rtwdev->hal; 7046 7047 if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA)) 7048 return; 7049 7050 rtw89_phy_edcca_thre_calc(rtwdev); 7051 rtw89_phy_edcca_log(rtwdev); 7052 } 7053 7054 enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, 7055 enum rtw89_phy_idx phy_idx) 7056 { 7057 rtw89_debug(rtwdev, RTW89_DBG_RFK, 7058 "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n", 7059 rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx); 7060 7061 switch (rtwdev->mlo_dbcc_mode) { 7062 case MLO_1_PLUS_1_1RF: 7063 if (phy_idx == RTW89_PHY_0) 7064 return RF_A; 7065 else 7066 return RF_B; 7067 case MLO_1_PLUS_1_2RF: 7068 if (phy_idx == RTW89_PHY_0) 7069 return RF_A; 7070 else 7071 return RF_D; 7072 case MLO_0_PLUS_2_1RF: 7073 case MLO_2_PLUS_0_1RF: 7074 /* for both PHY 0/1 */ 7075 return RF_AB; 7076 case MLO_0_PLUS_2_2RF: 7077 case MLO_2_PLUS_0_2RF: 7078 case MLO_2_PLUS_2_2RF: 7079 default: 7080 if (phy_idx == RTW89_PHY_0) 7081 return RF_AB; 7082 else 7083 return RF_CD; 7084 } 7085 } 7086 EXPORT_SYMBOL(rtw89_phy_get_kpath); 7087 7088 enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev, 7089 enum rtw89_phy_idx phy_idx) 7090 { 7091 rtw89_debug(rtwdev, RTW89_DBG_RFK, 7092 "[RFK] kpath dbcc_en: 0x%x, mode=0x%x, PHY%d\n", 7093 rtwdev->dbcc_en, rtwdev->mlo_dbcc_mode, phy_idx); 7094 7095 switch (rtwdev->mlo_dbcc_mode) { 7096 case MLO_1_PLUS_1_1RF: 7097 if (phy_idx == RTW89_PHY_0) 7098 return RF_PATH_A; 7099 else 7100 return RF_PATH_B; 7101 case MLO_1_PLUS_1_2RF: 7102 if (phy_idx == RTW89_PHY_0) 7103 return RF_PATH_A; 7104 else 7105 return RF_PATH_D; 7106 case MLO_0_PLUS_2_1RF: 7107 case MLO_2_PLUS_0_1RF: 7108 if (phy_idx == RTW89_PHY_0) 7109 return RF_PATH_A; 7110 else 7111 return RF_PATH_B; 7112 case MLO_0_PLUS_2_2RF: 7113 case MLO_2_PLUS_0_2RF: 7114 case MLO_2_PLUS_2_2RF: 7115 default: 7116 if (phy_idx == RTW89_PHY_0) 7117 return RF_PATH_A; 7118 else 7119 return RF_PATH_C; 7120 } 7121 } 7122 EXPORT_SYMBOL(rtw89_phy_get_syn_sel); 7123 7124 static const struct rtw89_ccx_regs rtw89_ccx_regs_ax = { 7125 .setting_addr = R_CCX, 7126 .edcca_opt_mask = B_CCX_EDCCA_OPT_MSK, 7127 .measurement_trig_mask = B_MEASUREMENT_TRIG_MSK, 7128 .trig_opt_mask = B_CCX_TRIG_OPT_MSK, 7129 .en_mask = B_CCX_EN_MSK, 7130 .ifs_cnt_addr = R_IFS_COUNTER, 7131 .ifs_clm_period_mask = B_IFS_CLM_PERIOD_MSK, 7132 .ifs_clm_cnt_unit_mask = B_IFS_CLM_COUNTER_UNIT_MSK, 7133 .ifs_clm_cnt_clear_mask = B_IFS_COUNTER_CLR_MSK, 7134 .ifs_collect_en_mask = B_IFS_COLLECT_EN, 7135 .ifs_t1_addr = R_IFS_T1, 7136 .ifs_t1_th_h_mask = B_IFS_T1_TH_HIGH_MSK, 7137 .ifs_t1_en_mask = B_IFS_T1_EN_MSK, 7138 .ifs_t1_th_l_mask = B_IFS_T1_TH_LOW_MSK, 7139 .ifs_t2_addr = R_IFS_T2, 7140 .ifs_t2_th_h_mask = B_IFS_T2_TH_HIGH_MSK, 7141 .ifs_t2_en_mask = B_IFS_T2_EN_MSK, 7142 .ifs_t2_th_l_mask = B_IFS_T2_TH_LOW_MSK, 7143 .ifs_t3_addr = R_IFS_T3, 7144 .ifs_t3_th_h_mask = B_IFS_T3_TH_HIGH_MSK, 7145 .ifs_t3_en_mask = B_IFS_T3_EN_MSK, 7146 .ifs_t3_th_l_mask = B_IFS_T3_TH_LOW_MSK, 7147 .ifs_t4_addr = R_IFS_T4, 7148 .ifs_t4_th_h_mask = B_IFS_T4_TH_HIGH_MSK, 7149 .ifs_t4_en_mask = B_IFS_T4_EN_MSK, 7150 .ifs_t4_th_l_mask = B_IFS_T4_TH_LOW_MSK, 7151 .ifs_clm_tx_cnt_addr = R_IFS_CLM_TX_CNT, 7152 .ifs_clm_edcca_excl_cca_fa_mask = B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK, 7153 .ifs_clm_tx_cnt_msk = B_IFS_CLM_TX_CNT_MSK, 7154 .ifs_clm_cca_addr = R_IFS_CLM_CCA, 7155 .ifs_clm_ofdmcca_excl_fa_mask = B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK, 7156 .ifs_clm_cckcca_excl_fa_mask = B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK, 7157 .ifs_clm_fa_addr = R_IFS_CLM_FA, 7158 .ifs_clm_ofdm_fa_mask = B_IFS_CLM_OFDM_FA_MSK, 7159 .ifs_clm_cck_fa_mask = B_IFS_CLM_CCK_FA_MSK, 7160 .ifs_his_addr = R_IFS_HIS, 7161 .ifs_t4_his_mask = B_IFS_T4_HIS_MSK, 7162 .ifs_t3_his_mask = B_IFS_T3_HIS_MSK, 7163 .ifs_t2_his_mask = B_IFS_T2_HIS_MSK, 7164 .ifs_t1_his_mask = B_IFS_T1_HIS_MSK, 7165 .ifs_avg_l_addr = R_IFS_AVG_L, 7166 .ifs_t2_avg_mask = B_IFS_T2_AVG_MSK, 7167 .ifs_t1_avg_mask = B_IFS_T1_AVG_MSK, 7168 .ifs_avg_h_addr = R_IFS_AVG_H, 7169 .ifs_t4_avg_mask = B_IFS_T4_AVG_MSK, 7170 .ifs_t3_avg_mask = B_IFS_T3_AVG_MSK, 7171 .ifs_cca_l_addr = R_IFS_CCA_L, 7172 .ifs_t2_cca_mask = B_IFS_T2_CCA_MSK, 7173 .ifs_t1_cca_mask = B_IFS_T1_CCA_MSK, 7174 .ifs_cca_h_addr = R_IFS_CCA_H, 7175 .ifs_t4_cca_mask = B_IFS_T4_CCA_MSK, 7176 .ifs_t3_cca_mask = B_IFS_T3_CCA_MSK, 7177 .ifs_total_addr = R_IFSCNT, 7178 .ifs_cnt_done_mask = B_IFSCNT_DONE_MSK, 7179 .ifs_total_mask = B_IFSCNT_TOTAL_CNT_MSK, 7180 }; 7181 7182 static const struct rtw89_physts_regs rtw89_physts_regs_ax = { 7183 .setting_addr = R_PLCP_HISTOGRAM, 7184 .dis_trigger_fail_mask = B_STS_DIS_TRIG_BY_FAIL, 7185 .dis_trigger_brk_mask = B_STS_DIS_TRIG_BY_BRK, 7186 }; 7187 7188 static const struct rtw89_cfo_regs rtw89_cfo_regs_ax = { 7189 .comp = R_DCFO_WEIGHT, 7190 .weighting_mask = B_DCFO_WEIGHT_MSK, 7191 .comp_seg0 = R_DCFO_OPT, 7192 .valid_0_mask = B_DCFO_OPT_EN, 7193 }; 7194 7195 const struct rtw89_phy_gen_def rtw89_phy_gen_ax = { 7196 .cr_base = 0x10000, 7197 .ccx = &rtw89_ccx_regs_ax, 7198 .physts = &rtw89_physts_regs_ax, 7199 .cfo = &rtw89_cfo_regs_ax, 7200 .phy0_phy1_offset = rtw89_phy0_phy1_offset_ax, 7201 .config_bb_gain = rtw89_phy_config_bb_gain_ax, 7202 .preinit_rf_nctl = rtw89_phy_preinit_rf_nctl_ax, 7203 .bb_wrap_init = NULL, 7204 .ch_info_init = NULL, 7205 7206 .set_txpwr_byrate = rtw89_phy_set_txpwr_byrate_ax, 7207 .set_txpwr_offset = rtw89_phy_set_txpwr_offset_ax, 7208 .set_txpwr_limit = rtw89_phy_set_txpwr_limit_ax, 7209 .set_txpwr_limit_ru = rtw89_phy_set_txpwr_limit_ru_ax, 7210 }; 7211 EXPORT_SYMBOL(rtw89_phy_gen_ax); 7212