1*6d67aabdSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2*6d67aabdSBjoern A. Zeeb /* Copyright(c) 2023 Realtek Corporation 3*6d67aabdSBjoern A. Zeeb */ 4*6d67aabdSBjoern A. Zeeb 5*6d67aabdSBjoern A. Zeeb #include "chan.h" 6*6d67aabdSBjoern A. Zeeb #include "debug.h" 7*6d67aabdSBjoern A. Zeeb #include "mac.h" 8*6d67aabdSBjoern A. Zeeb #include "phy.h" 9*6d67aabdSBjoern A. Zeeb #include "reg.h" 10*6d67aabdSBjoern A. Zeeb #include "rtw8922a.h" 11*6d67aabdSBjoern A. Zeeb #include "rtw8922a_rfk.h" 12*6d67aabdSBjoern A. Zeeb 13*6d67aabdSBjoern A. Zeeb static void rtw8922a_tssi_cont_en(struct rtw89_dev *rtwdev, bool en, 14*6d67aabdSBjoern A. Zeeb enum rtw89_rf_path path) 15*6d67aabdSBjoern A. Zeeb { 16*6d67aabdSBjoern A. Zeeb static const u32 tssi_trk_man[2] = {R_TSSI_PWR_P0, R_TSSI_PWR_P1}; 17*6d67aabdSBjoern A. Zeeb 18*6d67aabdSBjoern A. Zeeb if (en) 19*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 0); 20*6d67aabdSBjoern A. Zeeb else 21*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, tssi_trk_man[path], B_TSSI_CONT_EN, 1); 22*6d67aabdSBjoern A. Zeeb } 23*6d67aabdSBjoern A. Zeeb 24*6d67aabdSBjoern A. Zeeb void rtw8922a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en, u8 phy_idx) 25*6d67aabdSBjoern A. Zeeb { 26*6d67aabdSBjoern A. Zeeb if (rtwdev->mlo_dbcc_mode == MLO_1_PLUS_1_1RF) { 27*6d67aabdSBjoern A. Zeeb if (phy_idx == RTW89_PHY_0) 28*6d67aabdSBjoern A. Zeeb rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A); 29*6d67aabdSBjoern A. Zeeb else 30*6d67aabdSBjoern A. Zeeb rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B); 31*6d67aabdSBjoern A. Zeeb } else { 32*6d67aabdSBjoern A. Zeeb rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_A); 33*6d67aabdSBjoern A. Zeeb rtw8922a_tssi_cont_en(rtwdev, en, RF_PATH_B); 34*6d67aabdSBjoern A. Zeeb } 35*6d67aabdSBjoern A. Zeeb } 36*6d67aabdSBjoern A. Zeeb 37*6d67aabdSBjoern A. Zeeb static 38*6d67aabdSBjoern A. Zeeb void rtw8922a_ctl_band_ch_bw(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, 39*6d67aabdSBjoern A. Zeeb u8 central_ch, enum rtw89_band band, 40*6d67aabdSBjoern A. Zeeb enum rtw89_bandwidth bw) 41*6d67aabdSBjoern A. Zeeb { 42*6d67aabdSBjoern A. Zeeb const u32 rf_addr[2] = {RR_CFGCH, RR_CFGCH_V1}; 43*6d67aabdSBjoern A. Zeeb struct rtw89_hal *hal = &rtwdev->hal; 44*6d67aabdSBjoern A. Zeeb u32 rf_reg[RF_PATH_NUM_8922A][2]; 45*6d67aabdSBjoern A. Zeeb u8 synpath; 46*6d67aabdSBjoern A. Zeeb u32 rf18; 47*6d67aabdSBjoern A. Zeeb u8 kpath; 48*6d67aabdSBjoern A. Zeeb u8 path; 49*6d67aabdSBjoern A. Zeeb u8 i; 50*6d67aabdSBjoern A. Zeeb 51*6d67aabdSBjoern A. Zeeb rf_reg[RF_PATH_A][0] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[0], RFREG_MASK); 52*6d67aabdSBjoern A. Zeeb rf_reg[RF_PATH_A][1] = rtw89_read_rf(rtwdev, RF_PATH_A, rf_addr[1], RFREG_MASK); 53*6d67aabdSBjoern A. Zeeb rf_reg[RF_PATH_B][0] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[0], RFREG_MASK); 54*6d67aabdSBjoern A. Zeeb rf_reg[RF_PATH_B][1] = rtw89_read_rf(rtwdev, RF_PATH_B, rf_addr[1], RFREG_MASK); 55*6d67aabdSBjoern A. Zeeb 56*6d67aabdSBjoern A. Zeeb kpath = rtw89_phy_get_kpath(rtwdev, phy); 57*6d67aabdSBjoern A. Zeeb synpath = rtw89_phy_get_syn_sel(rtwdev, phy); 58*6d67aabdSBjoern A. Zeeb 59*6d67aabdSBjoern A. Zeeb rf18 = rtw89_read_rf(rtwdev, synpath, RR_CFGCH, RFREG_MASK); 60*6d67aabdSBjoern A. Zeeb if (rf18 == INV_RF_DATA) { 61*6d67aabdSBjoern A. Zeeb rtw89_warn(rtwdev, "[RFK] Invalid RF18 value\n"); 62*6d67aabdSBjoern A. Zeeb return; 63*6d67aabdSBjoern A. Zeeb } 64*6d67aabdSBjoern A. Zeeb 65*6d67aabdSBjoern A. Zeeb for (path = 0; path < RF_PATH_NUM_8922A; path++) { 66*6d67aabdSBjoern A. Zeeb if (!(kpath & BIT(path))) 67*6d67aabdSBjoern A. Zeeb continue; 68*6d67aabdSBjoern A. Zeeb 69*6d67aabdSBjoern A. Zeeb for (i = 0; i < 2; i++) { 70*6d67aabdSBjoern A. Zeeb if (rf_reg[path][i] == INV_RF_DATA) { 71*6d67aabdSBjoern A. Zeeb rtw89_warn(rtwdev, 72*6d67aabdSBjoern A. Zeeb "[RFK] Invalid RF_0x18 for Path-%d\n", path); 73*6d67aabdSBjoern A. Zeeb return; 74*6d67aabdSBjoern A. Zeeb } 75*6d67aabdSBjoern A. Zeeb 76*6d67aabdSBjoern A. Zeeb rf_reg[path][i] &= ~(RR_CFGCH_BAND1 | RR_CFGCH_BW | 77*6d67aabdSBjoern A. Zeeb RR_CFGCH_BAND0 | RR_CFGCH_CH); 78*6d67aabdSBjoern A. Zeeb rf_reg[path][i] |= u32_encode_bits(central_ch, RR_CFGCH_CH); 79*6d67aabdSBjoern A. Zeeb 80*6d67aabdSBjoern A. Zeeb if (band == RTW89_BAND_2G) 81*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x0); 82*6d67aabdSBjoern A. Zeeb else 83*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, path, RR_SMD, RR_VCO2, 0x1); 84*6d67aabdSBjoern A. Zeeb 85*6d67aabdSBjoern A. Zeeb switch (band) { 86*6d67aabdSBjoern A. Zeeb case RTW89_BAND_2G: 87*6d67aabdSBjoern A. Zeeb default: 88*6d67aabdSBjoern A. Zeeb break; 89*6d67aabdSBjoern A. Zeeb case RTW89_BAND_5G: 90*6d67aabdSBjoern A. Zeeb rf_reg[path][i] |= 91*6d67aabdSBjoern A. Zeeb u32_encode_bits(CFGCH_BAND1_5G, RR_CFGCH_BAND1) | 92*6d67aabdSBjoern A. Zeeb u32_encode_bits(CFGCH_BAND0_5G, RR_CFGCH_BAND0); 93*6d67aabdSBjoern A. Zeeb break; 94*6d67aabdSBjoern A. Zeeb case RTW89_BAND_6G: 95*6d67aabdSBjoern A. Zeeb rf_reg[path][i] |= 96*6d67aabdSBjoern A. Zeeb u32_encode_bits(CFGCH_BAND1_6G, RR_CFGCH_BAND1) | 97*6d67aabdSBjoern A. Zeeb u32_encode_bits(CFGCH_BAND0_6G, RR_CFGCH_BAND0); 98*6d67aabdSBjoern A. Zeeb break; 99*6d67aabdSBjoern A. Zeeb } 100*6d67aabdSBjoern A. Zeeb 101*6d67aabdSBjoern A. Zeeb switch (bw) { 102*6d67aabdSBjoern A. Zeeb case RTW89_CHANNEL_WIDTH_5: 103*6d67aabdSBjoern A. Zeeb case RTW89_CHANNEL_WIDTH_10: 104*6d67aabdSBjoern A. Zeeb case RTW89_CHANNEL_WIDTH_20: 105*6d67aabdSBjoern A. Zeeb default: 106*6d67aabdSBjoern A. Zeeb break; 107*6d67aabdSBjoern A. Zeeb case RTW89_CHANNEL_WIDTH_40: 108*6d67aabdSBjoern A. Zeeb rf_reg[path][i] |= 109*6d67aabdSBjoern A. Zeeb u32_encode_bits(CFGCH_BW_V2_40M, RR_CFGCH_BW_V2); 110*6d67aabdSBjoern A. Zeeb break; 111*6d67aabdSBjoern A. Zeeb case RTW89_CHANNEL_WIDTH_80: 112*6d67aabdSBjoern A. Zeeb rf_reg[path][i] |= 113*6d67aabdSBjoern A. Zeeb u32_encode_bits(CFGCH_BW_V2_80M, RR_CFGCH_BW_V2); 114*6d67aabdSBjoern A. Zeeb break; 115*6d67aabdSBjoern A. Zeeb case RTW89_CHANNEL_WIDTH_160: 116*6d67aabdSBjoern A. Zeeb rf_reg[path][i] |= 117*6d67aabdSBjoern A. Zeeb u32_encode_bits(CFGCH_BW_V2_160M, RR_CFGCH_BW_V2); 118*6d67aabdSBjoern A. Zeeb break; 119*6d67aabdSBjoern A. Zeeb case RTW89_CHANNEL_WIDTH_320: 120*6d67aabdSBjoern A. Zeeb rf_reg[path][i] |= 121*6d67aabdSBjoern A. Zeeb u32_encode_bits(CFGCH_BW_V2_320M, RR_CFGCH_BW_V2); 122*6d67aabdSBjoern A. Zeeb break; 123*6d67aabdSBjoern A. Zeeb } 124*6d67aabdSBjoern A. Zeeb 125*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, path, rf_addr[i], 126*6d67aabdSBjoern A. Zeeb RFREG_MASK, rf_reg[path][i]); 127*6d67aabdSBjoern A. Zeeb fsleep(100); 128*6d67aabdSBjoern A. Zeeb } 129*6d67aabdSBjoern A. Zeeb } 130*6d67aabdSBjoern A. Zeeb 131*6d67aabdSBjoern A. Zeeb if (hal->cv != CHIP_CAV) 132*6d67aabdSBjoern A. Zeeb return; 133*6d67aabdSBjoern A. Zeeb 134*6d67aabdSBjoern A. Zeeb if (band == RTW89_BAND_2G) { 135*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000); 136*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003); 137*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c990); 138*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38); 139*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000); 140*6d67aabdSBjoern A. Zeeb } else { 141*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000); 142*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x00003); 143*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c190); 144*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0xebe38); 145*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000); 146*6d67aabdSBjoern A. Zeeb } 147*6d67aabdSBjoern A. Zeeb } 148*6d67aabdSBjoern A. Zeeb 149*6d67aabdSBjoern A. Zeeb void rtw8922a_set_channel_rf(struct rtw89_dev *rtwdev, 150*6d67aabdSBjoern A. Zeeb const struct rtw89_chan *chan, 151*6d67aabdSBjoern A. Zeeb enum rtw89_phy_idx phy_idx) 152*6d67aabdSBjoern A. Zeeb { 153*6d67aabdSBjoern A. Zeeb rtw8922a_ctl_band_ch_bw(rtwdev, phy_idx, chan->channel, chan->band_type, 154*6d67aabdSBjoern A. Zeeb chan->band_width); 155*6d67aabdSBjoern A. Zeeb } 156*6d67aabdSBjoern A. Zeeb 157*6d67aabdSBjoern A. Zeeb enum _rf_syn_pow { 158*6d67aabdSBjoern A. Zeeb RF_SYN_ON_OFF, 159*6d67aabdSBjoern A. Zeeb RF_SYN_OFF_ON, 160*6d67aabdSBjoern A. Zeeb RF_SYN_ALLON, 161*6d67aabdSBjoern A. Zeeb RF_SYN_ALLOFF, 162*6d67aabdSBjoern A. Zeeb }; 163*6d67aabdSBjoern A. Zeeb 164*6d67aabdSBjoern A. Zeeb static void rtw8922a_set_syn01_cav(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn) 165*6d67aabdSBjoern A. Zeeb { 166*6d67aabdSBjoern A. Zeeb if (syn == RF_SYN_ALLON) { 167*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); 168*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2); 169*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); 170*6d67aabdSBjoern A. Zeeb 171*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3); 172*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2); 173*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3); 174*6d67aabdSBjoern A. Zeeb } else if (syn == RF_SYN_ON_OFF) { 175*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); 176*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x2); 177*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x3); 178*6d67aabdSBjoern A. Zeeb 179*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0); 180*6d67aabdSBjoern A. Zeeb } else if (syn == RF_SYN_OFF_ON) { 181*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0); 182*6d67aabdSBjoern A. Zeeb 183*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3); 184*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x2); 185*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x3); 186*6d67aabdSBjoern A. Zeeb } else if (syn == RF_SYN_ALLOFF) { 187*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN, 0x0); 188*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN, 0x0); 189*6d67aabdSBjoern A. Zeeb } 190*6d67aabdSBjoern A. Zeeb } 191*6d67aabdSBjoern A. Zeeb 192*6d67aabdSBjoern A. Zeeb static void rtw8922a_set_syn01_cbv(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn) 193*6d67aabdSBjoern A. Zeeb { 194*6d67aabdSBjoern A. Zeeb if (syn == RF_SYN_ALLON) { 195*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf); 196*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf); 197*6d67aabdSBjoern A. Zeeb } else if (syn == RF_SYN_ON_OFF) { 198*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0xf); 199*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0); 200*6d67aabdSBjoern A. Zeeb } else if (syn == RF_SYN_OFF_ON) { 201*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0); 202*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0xf); 203*6d67aabdSBjoern A. Zeeb } else if (syn == RF_SYN_ALLOFF) { 204*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_POW, RR_POW_SYN_V1, 0x0); 205*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_POW, RR_POW_SYN_V1, 0x0); 206*6d67aabdSBjoern A. Zeeb } 207*6d67aabdSBjoern A. Zeeb } 208*6d67aabdSBjoern A. Zeeb 209*6d67aabdSBjoern A. Zeeb static void rtw8922a_set_syn01(struct rtw89_dev *rtwdev, enum _rf_syn_pow syn) 210*6d67aabdSBjoern A. Zeeb { 211*6d67aabdSBjoern A. Zeeb struct rtw89_hal *hal = &rtwdev->hal; 212*6d67aabdSBjoern A. Zeeb 213*6d67aabdSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_RFK, "SYN config=%d\n", syn); 214*6d67aabdSBjoern A. Zeeb 215*6d67aabdSBjoern A. Zeeb if (hal->cv == CHIP_CAV) 216*6d67aabdSBjoern A. Zeeb rtw8922a_set_syn01_cav(rtwdev, syn); 217*6d67aabdSBjoern A. Zeeb else 218*6d67aabdSBjoern A. Zeeb rtw8922a_set_syn01_cbv(rtwdev, syn); 219*6d67aabdSBjoern A. Zeeb } 220*6d67aabdSBjoern A. Zeeb 221*6d67aabdSBjoern A. Zeeb static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx) 222*6d67aabdSBjoern A. Zeeb { 223*6d67aabdSBjoern A. Zeeb u32 tmp; 224*6d67aabdSBjoern A. Zeeb 225*6d67aabdSBjoern A. Zeeb if (idx > 2) { 226*6d67aabdSBjoern A. Zeeb rtw89_warn(rtwdev, "[DBCC][ERROR]indx is out of limit!! index(%d)", idx); 227*6d67aabdSBjoern A. Zeeb return; 228*6d67aabdSBjoern A. Zeeb } 229*6d67aabdSBjoern A. Zeeb 230*6d67aabdSBjoern A. Zeeb if (kpath & RF_A) { 231*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_EN, 0x1); 232*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1, idx); 233*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_MDPD_V1, idx); 234*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RR_TXG_SEL, 0x4 | idx); 235*6d67aabdSBjoern A. Zeeb 236*6d67aabdSBjoern A. Zeeb tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(0)); 237*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G3, tmp); 238*6d67aabdSBjoern A. Zeeb tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, BIT(1)); 239*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT, B_CFIR_LUT_G5, tmp); 240*6d67aabdSBjoern A. Zeeb } 241*6d67aabdSBjoern A. Zeeb 242*6d67aabdSBjoern A. Zeeb if (kpath & RF_B) { 243*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_EN, 0x1); 244*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1, idx); 245*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_MDPD_V1, idx); 246*6d67aabdSBjoern A. Zeeb rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RR_TXG_SEL, 0x4 | idx); 247*6d67aabdSBjoern A. Zeeb 248*6d67aabdSBjoern A. Zeeb tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(0)); 249*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G3, tmp); 250*6d67aabdSBjoern A. Zeeb tmp = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, BIT(1)); 251*6d67aabdSBjoern A. Zeeb rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT_C1, B_CFIR_LUT_G5, tmp); 252*6d67aabdSBjoern A. Zeeb } 253*6d67aabdSBjoern A. Zeeb } 254*6d67aabdSBjoern A. Zeeb 255*6d67aabdSBjoern A. Zeeb static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) 256*6d67aabdSBjoern A. Zeeb { 257*6d67aabdSBjoern A. Zeeb struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 258*6d67aabdSBjoern A. Zeeb struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {}; 259*6d67aabdSBjoern A. Zeeb enum rtw89_sub_entity_idx sub_entity_idx; 260*6d67aabdSBjoern A. Zeeb const struct rtw89_chan *chan; 261*6d67aabdSBjoern A. Zeeb enum rtw89_entity_mode mode; 262*6d67aabdSBjoern A. Zeeb u8 s0_tbl, s1_tbl; 263*6d67aabdSBjoern A. Zeeb u8 tbl_sel; 264*6d67aabdSBjoern A. Zeeb 265*6d67aabdSBjoern A. Zeeb mode = rtw89_get_entity_mode(rtwdev); 266*6d67aabdSBjoern A. Zeeb switch (mode) { 267*6d67aabdSBjoern A. Zeeb case RTW89_ENTITY_MODE_MCC_PREPARE: 268*6d67aabdSBjoern A. Zeeb sub_entity_idx = RTW89_SUB_ENTITY_1; 269*6d67aabdSBjoern A. Zeeb break; 270*6d67aabdSBjoern A. Zeeb default: 271*6d67aabdSBjoern A. Zeeb sub_entity_idx = RTW89_SUB_ENTITY_0; 272*6d67aabdSBjoern A. Zeeb break; 273*6d67aabdSBjoern A. Zeeb } 274*6d67aabdSBjoern A. Zeeb 275*6d67aabdSBjoern A. Zeeb chan = rtw89_chan_get(rtwdev, sub_entity_idx); 276*6d67aabdSBjoern A. Zeeb 277*6d67aabdSBjoern A. Zeeb for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) { 278*6d67aabdSBjoern A. Zeeb struct rtw89_rfk_chan_desc *p = &desc[tbl_sel]; 279*6d67aabdSBjoern A. Zeeb 280*6d67aabdSBjoern A. Zeeb p->ch = rfk_mcc->ch[tbl_sel]; 281*6d67aabdSBjoern A. Zeeb 282*6d67aabdSBjoern A. Zeeb p->has_band = true; 283*6d67aabdSBjoern A. Zeeb p->band = rfk_mcc->band[tbl_sel]; 284*6d67aabdSBjoern A. Zeeb 285*6d67aabdSBjoern A. Zeeb p->has_bw = true; 286*6d67aabdSBjoern A. Zeeb p->bw = rfk_mcc->bw[tbl_sel]; 287*6d67aabdSBjoern A. Zeeb } 288*6d67aabdSBjoern A. Zeeb 289*6d67aabdSBjoern A. Zeeb tbl_sel = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan); 290*6d67aabdSBjoern A. Zeeb 291*6d67aabdSBjoern A. Zeeb rfk_mcc->ch[tbl_sel] = chan->channel; 292*6d67aabdSBjoern A. Zeeb rfk_mcc->band[tbl_sel] = chan->band_type; 293*6d67aabdSBjoern A. Zeeb rfk_mcc->bw[tbl_sel] = chan->band_width; 294*6d67aabdSBjoern A. Zeeb rfk_mcc->table_idx = tbl_sel; 295*6d67aabdSBjoern A. Zeeb 296*6d67aabdSBjoern A. Zeeb s0_tbl = tbl_sel; 297*6d67aabdSBjoern A. Zeeb s1_tbl = tbl_sel; 298*6d67aabdSBjoern A. Zeeb 299*6d67aabdSBjoern A. Zeeb rtw8922a_chlk_ktbl_sel(rtwdev, RF_A, s0_tbl); 300*6d67aabdSBjoern A. Zeeb rtw8922a_chlk_ktbl_sel(rtwdev, RF_B, s1_tbl); 301*6d67aabdSBjoern A. Zeeb } 302*6d67aabdSBjoern A. Zeeb 303*6d67aabdSBjoern A. Zeeb static void rtw8922a_rfk_mlo_ctrl(struct rtw89_dev *rtwdev) 304*6d67aabdSBjoern A. Zeeb { 305*6d67aabdSBjoern A. Zeeb enum _rf_syn_pow syn_pow; 306*6d67aabdSBjoern A. Zeeb 307*6d67aabdSBjoern A. Zeeb if (!rtwdev->dbcc_en) 308*6d67aabdSBjoern A. Zeeb goto set_rfk_reload; 309*6d67aabdSBjoern A. Zeeb 310*6d67aabdSBjoern A. Zeeb switch (rtwdev->mlo_dbcc_mode) { 311*6d67aabdSBjoern A. Zeeb case MLO_0_PLUS_2_1RF: 312*6d67aabdSBjoern A. Zeeb syn_pow = RF_SYN_OFF_ON; 313*6d67aabdSBjoern A. Zeeb break; 314*6d67aabdSBjoern A. Zeeb case MLO_0_PLUS_2_2RF: 315*6d67aabdSBjoern A. Zeeb case MLO_1_PLUS_1_2RF: 316*6d67aabdSBjoern A. Zeeb case MLO_2_PLUS_0_1RF: 317*6d67aabdSBjoern A. Zeeb case MLO_2_PLUS_0_2RF: 318*6d67aabdSBjoern A. Zeeb case MLO_2_PLUS_2_2RF: 319*6d67aabdSBjoern A. Zeeb case MLO_DBCC_NOT_SUPPORT: 320*6d67aabdSBjoern A. Zeeb default: 321*6d67aabdSBjoern A. Zeeb syn_pow = RF_SYN_ON_OFF; 322*6d67aabdSBjoern A. Zeeb break; 323*6d67aabdSBjoern A. Zeeb case MLO_1_PLUS_1_1RF: 324*6d67aabdSBjoern A. Zeeb case DBCC_LEGACY: 325*6d67aabdSBjoern A. Zeeb syn_pow = RF_SYN_ALLON; 326*6d67aabdSBjoern A. Zeeb break; 327*6d67aabdSBjoern A. Zeeb } 328*6d67aabdSBjoern A. Zeeb 329*6d67aabdSBjoern A. Zeeb rtw8922a_set_syn01(rtwdev, syn_pow); 330*6d67aabdSBjoern A. Zeeb 331*6d67aabdSBjoern A. Zeeb set_rfk_reload: 332*6d67aabdSBjoern A. Zeeb rtw8922a_chlk_reload(rtwdev); 333*6d67aabdSBjoern A. Zeeb } 334*6d67aabdSBjoern A. Zeeb 335*6d67aabdSBjoern A. Zeeb static void rtw8922a_rfk_pll_init(struct rtw89_dev *rtwdev) 336*6d67aabdSBjoern A. Zeeb { 337*6d67aabdSBjoern A. Zeeb int ret; 338*6d67aabdSBjoern A. Zeeb u8 tmp; 339*6d67aabdSBjoern A. Zeeb 340*6d67aabdSBjoern A. Zeeb ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_PLL_1, &tmp); 341*6d67aabdSBjoern A. Zeeb if (ret) 342*6d67aabdSBjoern A. Zeeb return; 343*6d67aabdSBjoern A. Zeeb ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_PLL_1, tmp | 0xf8, 0xFF); 344*6d67aabdSBjoern A. Zeeb if (ret) 345*6d67aabdSBjoern A. Zeeb return; 346*6d67aabdSBjoern A. Zeeb 347*6d67aabdSBjoern A. Zeeb ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_APBT, &tmp); 348*6d67aabdSBjoern A. Zeeb if (ret) 349*6d67aabdSBjoern A. Zeeb return; 350*6d67aabdSBjoern A. Zeeb ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_APBT, tmp & ~0x60, 0xFF); 351*6d67aabdSBjoern A. Zeeb if (ret) 352*6d67aabdSBjoern A. Zeeb return; 353*6d67aabdSBjoern A. Zeeb 354*6d67aabdSBjoern A. Zeeb ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, &tmp); 355*6d67aabdSBjoern A. Zeeb if (ret) 356*6d67aabdSBjoern A. Zeeb return; 357*6d67aabdSBjoern A. Zeeb ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_PLL, tmp | 0x38, 0xFF); 358*6d67aabdSBjoern A. Zeeb if (ret) 359*6d67aabdSBjoern A. Zeeb return; 360*6d67aabdSBjoern A. Zeeb } 361*6d67aabdSBjoern A. Zeeb 362*6d67aabdSBjoern A. Zeeb void rtw8922a_rfk_hw_init(struct rtw89_dev *rtwdev) 363*6d67aabdSBjoern A. Zeeb { 364*6d67aabdSBjoern A. Zeeb if (rtwdev->dbcc_en) 365*6d67aabdSBjoern A. Zeeb rtw8922a_rfk_mlo_ctrl(rtwdev); 366*6d67aabdSBjoern A. Zeeb 367*6d67aabdSBjoern A. Zeeb rtw8922a_rfk_pll_init(rtwdev); 368*6d67aabdSBjoern A. Zeeb } 369*6d67aabdSBjoern A. Zeeb 370*6d67aabdSBjoern A. Zeeb void rtw8922a_pre_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 371*6d67aabdSBjoern A. Zeeb { 372*6d67aabdSBjoern A. Zeeb bool mlo_1_1; 373*6d67aabdSBjoern A. Zeeb 374*6d67aabdSBjoern A. Zeeb if (!rtwdev->dbcc_en) 375*6d67aabdSBjoern A. Zeeb return; 376*6d67aabdSBjoern A. Zeeb 377*6d67aabdSBjoern A. Zeeb mlo_1_1 = rtw89_is_mlo_1_1(rtwdev); 378*6d67aabdSBjoern A. Zeeb if (mlo_1_1) 379*6d67aabdSBjoern A. Zeeb rtw8922a_set_syn01(rtwdev, RF_SYN_ALLON); 380*6d67aabdSBjoern A. Zeeb else if (phy_idx == RTW89_PHY_0) 381*6d67aabdSBjoern A. Zeeb rtw8922a_set_syn01(rtwdev, RF_SYN_ON_OFF); 382*6d67aabdSBjoern A. Zeeb else 383*6d67aabdSBjoern A. Zeeb rtw8922a_set_syn01(rtwdev, RF_SYN_OFF_ON); 384*6d67aabdSBjoern A. Zeeb 385*6d67aabdSBjoern A. Zeeb fsleep(1000); 386*6d67aabdSBjoern A. Zeeb } 387*6d67aabdSBjoern A. Zeeb 388*6d67aabdSBjoern A. Zeeb void rtw8922a_post_set_channel_rf(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 389*6d67aabdSBjoern A. Zeeb { 390*6d67aabdSBjoern A. Zeeb rtw8922a_rfk_mlo_ctrl(rtwdev); 391*6d67aabdSBjoern A. Zeeb } 392