1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "debug.h" 6 #include "efuse.h" 7 #include "mac.h" 8 #include "reg.h" 9 10 #define EF_FV_OFSET 0x5ea 11 #define EF_CV_MASK GENMASK(7, 4) 12 #define EF_CV_INV 15 13 14 enum rtw89_efuse_bank { 15 RTW89_EFUSE_BANK_WIFI, 16 RTW89_EFUSE_BANK_BT, 17 }; 18 19 static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev, 20 enum rtw89_efuse_bank bank) 21 { 22 u8 val; 23 24 if (rtwdev->chip->chip_id != RTL8852A) 25 return 0; 26 27 val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1, 28 B_AX_EF_CELL_SEL_MASK); 29 if (bank == val) 30 return 0; 31 32 rtw89_write32_mask(rtwdev, R_AX_EFUSE_CTRL_1, B_AX_EF_CELL_SEL_MASK, 33 bank); 34 35 val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1, 36 B_AX_EF_CELL_SEL_MASK); 37 if (bank == val) 38 return 0; 39 40 return -EBUSY; 41 } 42 43 static void rtw89_enable_otp_burst_mode(struct rtw89_dev *rtwdev, bool en) 44 { 45 if (en) 46 rtw89_write32_set(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST); 47 else 48 rtw89_write32_clr(rtwdev, R_AX_EFUSE_CTRL_1_V1, B_AX_EF_BURST); 49 } 50 51 static void rtw89_enable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev) 52 { 53 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 54 struct rtw89_hal *hal = &rtwdev->hal; 55 56 if (chip_id == RTL8852A) 57 return; 58 59 rtw89_write8_set(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK); 60 rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14); 61 62 fsleep(1000); 63 64 rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15); 65 rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE); 66 if (chip_id == RTL8852B && hal->cv == CHIP_CAV) 67 rtw89_enable_otp_burst_mode(rtwdev, true); 68 } 69 70 static void rtw89_disable_efuse_pwr_cut_ddv(struct rtw89_dev *rtwdev) 71 { 72 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 73 struct rtw89_hal *hal = &rtwdev->hal; 74 75 if (chip_id == RTL8852A) 76 return; 77 78 if (chip_id == RTL8852B && hal->cv == CHIP_CAV) 79 rtw89_enable_otp_burst_mode(rtwdev, false); 80 81 rtw89_write16_set(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_ISO_EB2CORE); 82 rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B15); 83 84 fsleep(1000); 85 86 rtw89_write16_clr(rtwdev, R_AX_SYS_ISO_CTRL, B_AX_PWC_EV2EF_B14); 87 rtw89_write8_clr(rtwdev, R_AX_PMC_DBG_CTRL2, B_AX_SYSON_DIS_PMCR_AX_WRMSK); 88 } 89 90 static int rtw89_dump_physical_efuse_map_ddv(struct rtw89_dev *rtwdev, u8 *map, 91 u32 dump_addr, u32 dump_size) 92 { 93 u32 efuse_ctl; 94 u32 addr; 95 int ret; 96 97 rtw89_enable_efuse_pwr_cut_ddv(rtwdev); 98 99 for (addr = dump_addr; addr < dump_addr + dump_size; addr++) { 100 efuse_ctl = u32_encode_bits(addr, B_AX_EF_ADDR_MASK); 101 rtw89_write32(rtwdev, R_AX_EFUSE_CTRL, efuse_ctl & ~B_AX_EF_RDY); 102 103 ret = read_poll_timeout_atomic(rtw89_read32, efuse_ctl, 104 efuse_ctl & B_AX_EF_RDY, 1, 1000000, 105 true, rtwdev, R_AX_EFUSE_CTRL); 106 if (ret) 107 return -EBUSY; 108 109 *map++ = (u8)(efuse_ctl & 0xff); 110 } 111 112 rtw89_disable_efuse_pwr_cut_ddv(rtwdev); 113 114 return 0; 115 } 116 117 int rtw89_cnv_efuse_state_ax(struct rtw89_dev *rtwdev, bool idle) 118 { 119 return 0; 120 } 121 122 static int rtw89_dump_physical_efuse_map_dav(struct rtw89_dev *rtwdev, u8 *map, 123 u32 dump_addr, u32 dump_size) 124 { 125 u32 addr; 126 u8 val8; 127 int err; 128 int ret; 129 130 for (addr = dump_addr; addr < dump_addr + dump_size; addr++) { 131 ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0x40, FULL_BIT_MASK); 132 if (ret) 133 return ret; 134 ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_LOW_ADDR, 135 addr & 0xff, XTAL_SI_LOW_ADDR_MASK); 136 if (ret) 137 return ret; 138 ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, addr >> 8, 139 XTAL_SI_HIGH_ADDR_MASK); 140 if (ret) 141 return ret; 142 ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_CTRL, 0, 143 XTAL_SI_MODE_SEL_MASK); 144 if (ret) 145 return ret; 146 147 ret = read_poll_timeout_atomic(rtw89_mac_read_xtal_si, err, 148 !err && (val8 & XTAL_SI_RDY), 149 1, 10000, false, 150 rtwdev, XTAL_SI_CTRL, &val8); 151 if (ret) { 152 rtw89_warn(rtwdev, "failed to read dav efuse\n"); 153 return ret; 154 } 155 156 ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_READ_VAL, &val8); 157 if (ret) 158 return ret; 159 *map++ = val8; 160 } 161 162 return 0; 163 } 164 165 static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map, 166 u32 dump_addr, u32 dump_size, bool dav) 167 { 168 int ret; 169 170 if (!map || dump_size == 0) 171 return 0; 172 173 rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI); 174 175 if (dav) { 176 ret = rtw89_dump_physical_efuse_map_dav(rtwdev, map, dump_addr, dump_size); 177 if (ret) 178 return ret; 179 } else { 180 ret = rtw89_dump_physical_efuse_map_ddv(rtwdev, map, dump_addr, dump_size); 181 if (ret) 182 return ret; 183 } 184 185 return 0; 186 } 187 188 #define invalid_efuse_header(hdr1, hdr2) \ 189 ((hdr1) == 0xff || (hdr2) == 0xff) 190 #define invalid_efuse_content(word_en, i) \ 191 (((word_en) & BIT(i)) != 0x0) 192 #define get_efuse_blk_idx(hdr1, hdr2) \ 193 ((((hdr2) & 0xf0) >> 4) | (((hdr1) & 0x0f) << 4)) 194 #define block_idx_to_logical_idx(blk_idx, i) \ 195 (((blk_idx) << 3) + ((i) << 1)) 196 static int rtw89_dump_logical_efuse_map(struct rtw89_dev *rtwdev, u8 *phy_map, 197 u8 *log_map) 198 { 199 u32 physical_size = rtwdev->chip->physical_efuse_size; 200 u32 logical_size = rtwdev->chip->logical_efuse_size; 201 u8 sec_ctrl_size = rtwdev->chip->sec_ctrl_efuse_size; 202 u32 phy_idx = sec_ctrl_size; 203 u32 log_idx; 204 u8 hdr1, hdr2; 205 u8 blk_idx; 206 u8 word_en; 207 int i; 208 209 if (!phy_map) 210 return 0; 211 212 while (phy_idx < physical_size - sec_ctrl_size) { 213 hdr1 = phy_map[phy_idx]; 214 hdr2 = phy_map[phy_idx + 1]; 215 if (invalid_efuse_header(hdr1, hdr2)) 216 break; 217 218 blk_idx = get_efuse_blk_idx(hdr1, hdr2); 219 word_en = hdr2 & 0xf; 220 phy_idx += 2; 221 222 for (i = 0; i < 4; i++) { 223 if (invalid_efuse_content(word_en, i)) 224 continue; 225 226 log_idx = block_idx_to_logical_idx(blk_idx, i); 227 if (phy_idx + 1 > physical_size - sec_ctrl_size - 1 || 228 log_idx + 1 > logical_size) 229 return -EINVAL; 230 231 log_map[log_idx] = phy_map[phy_idx]; 232 log_map[log_idx + 1] = phy_map[phy_idx + 1]; 233 phy_idx += 2; 234 } 235 } 236 return 0; 237 } 238 239 int rtw89_parse_efuse_map_ax(struct rtw89_dev *rtwdev) 240 { 241 u32 phy_size = rtwdev->chip->physical_efuse_size; 242 u32 log_size = rtwdev->chip->logical_efuse_size; 243 u32 dav_phy_size = rtwdev->chip->dav_phy_efuse_size; 244 u32 dav_log_size = rtwdev->chip->dav_log_efuse_size; 245 u32 full_log_size = log_size + dav_log_size; 246 u8 *phy_map = NULL; 247 u8 *log_map = NULL; 248 u8 *dav_phy_map = NULL; 249 u8 *dav_log_map = NULL; 250 int ret; 251 252 if (rtw89_read16(rtwdev, R_AX_SYS_WL_EFUSE_CTRL) & B_AX_AUTOLOAD_SUS) 253 rtwdev->efuse.valid = true; 254 else 255 rtw89_warn(rtwdev, "failed to check efuse autoload\n"); 256 257 phy_map = kmalloc(phy_size, GFP_KERNEL); 258 log_map = kmalloc(full_log_size, GFP_KERNEL); 259 if (dav_phy_size && dav_log_size) { 260 dav_phy_map = kmalloc(dav_phy_size, GFP_KERNEL); 261 dav_log_map = log_map + log_size; 262 } 263 264 if (!phy_map || !log_map || (dav_phy_size && !dav_phy_map)) { 265 ret = -ENOMEM; 266 goto out_free; 267 } 268 269 ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size, false); 270 if (ret) { 271 rtw89_warn(rtwdev, "failed to dump efuse physical map\n"); 272 goto out_free; 273 } 274 ret = rtw89_dump_physical_efuse_map(rtwdev, dav_phy_map, 0, dav_phy_size, true); 275 if (ret) { 276 rtw89_warn(rtwdev, "failed to dump efuse dav physical map\n"); 277 goto out_free; 278 } 279 280 memset(log_map, 0xff, full_log_size); 281 ret = rtw89_dump_logical_efuse_map(rtwdev, phy_map, log_map); 282 if (ret) { 283 rtw89_warn(rtwdev, "failed to dump efuse logical map\n"); 284 goto out_free; 285 } 286 ret = rtw89_dump_logical_efuse_map(rtwdev, dav_phy_map, dav_log_map); 287 if (ret) { 288 rtw89_warn(rtwdev, "failed to dump efuse dav logical map\n"); 289 goto out_free; 290 } 291 292 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, full_log_size); 293 294 ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map, RTW89_EFUSE_BLOCK_IGNORE); 295 if (ret) { 296 rtw89_warn(rtwdev, "failed to read efuse map\n"); 297 goto out_free; 298 } 299 300 out_free: 301 kfree(dav_phy_map); 302 kfree(log_map); 303 kfree(phy_map); 304 305 return ret; 306 } 307 308 int rtw89_parse_phycap_map_ax(struct rtw89_dev *rtwdev) 309 { 310 u32 phycap_addr = rtwdev->chip->phycap_addr; 311 u32 phycap_size = rtwdev->chip->phycap_size; 312 u8 *phycap_map = NULL; 313 int ret = 0; 314 315 if (!phycap_size) 316 return 0; 317 318 phycap_map = kmalloc(phycap_size, GFP_KERNEL); 319 if (!phycap_map) 320 return -ENOMEM; 321 322 ret = rtw89_dump_physical_efuse_map(rtwdev, phycap_map, 323 phycap_addr, phycap_size, false); 324 if (ret) { 325 rtw89_warn(rtwdev, "failed to dump phycap map\n"); 326 goto out_free; 327 } 328 329 ret = rtwdev->chip->ops->read_phycap(rtwdev, phycap_map); 330 if (ret) { 331 rtw89_warn(rtwdev, "failed to read phycap map\n"); 332 goto out_free; 333 } 334 335 out_free: 336 kfree(phycap_map); 337 338 return ret; 339 } 340 341 int rtw89_read_efuse_ver(struct rtw89_dev *rtwdev, u8 *ecv) 342 { 343 int ret; 344 u8 val; 345 346 ret = rtw89_dump_physical_efuse_map(rtwdev, &val, EF_FV_OFSET, 1, false); 347 if (ret) 348 return ret; 349 350 *ecv = u8_get_bits(val, EF_CV_MASK); 351 if (*ecv == EF_CV_INV) 352 return -ENOENT; 353 354 return 0; 355 } 356 EXPORT_SYMBOL(rtw89_read_efuse_ver); 357