1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include "main.h" 6 #include "mac.h" 7 #include "reg.h" 8 #include "fw.h" 9 #include "debug.h" 10 #include "sdio.h" 11 12 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw, 13 u8 primary_ch_idx) 14 { 15 u8 txsc40 = 0, txsc20 = 0; 16 u32 value32; 17 u8 value8; 18 19 txsc20 = primary_ch_idx; 20 if (bw == RTW_CHANNEL_WIDTH_80) { 21 if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST) 22 txsc40 = RTW_SC_40_UPPER; 23 else 24 txsc40 = RTW_SC_40_LOWER; 25 } 26 rtw_write8(rtwdev, REG_DATA_SC, 27 BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40)); 28 29 value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL); 30 value32 &= ~BIT_RFMOD; 31 switch (bw) { 32 case RTW_CHANNEL_WIDTH_80: 33 value32 |= BIT_RFMOD_80M; 34 break; 35 case RTW_CHANNEL_WIDTH_40: 36 value32 |= BIT_RFMOD_40M; 37 break; 38 case RTW_CHANNEL_WIDTH_20: 39 default: 40 break; 41 } 42 rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32); 43 44 if (rtw_chip_wcpu_11n(rtwdev)) 45 return; 46 47 value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL); 48 value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL); 49 rtw_write32(rtwdev, REG_AFE_CTRL1, value32); 50 51 rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED); 52 rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED); 53 54 value8 = rtw_read8(rtwdev, REG_CCK_CHECK); 55 value8 = value8 & ~BIT_CHECK_CCK_EN; 56 if (IS_CH_5G_BAND(channel)) 57 value8 |= BIT_CHECK_CCK_EN; 58 rtw_write8(rtwdev, REG_CCK_CHECK, value8); 59 } 60 EXPORT_SYMBOL(rtw_set_channel_mac); 61 62 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev) 63 { 64 unsigned int retry; 65 u32 value32; 66 u8 value8; 67 68 rtw_write8(rtwdev, REG_RSV_CTRL, 0); 69 70 if (rtw_chip_wcpu_11n(rtwdev)) { 71 if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO) 72 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL); 73 else 74 rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL); 75 return 0; 76 } 77 78 switch (rtw_hci_type(rtwdev)) { 79 case RTW_HCI_TYPE_PCIE: 80 rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS); 81 break; 82 case RTW_HCI_TYPE_SDIO: 83 rtw_write8_clr(rtwdev, REG_SDIO_HSUS_CTRL, BIT_HCI_SUS_REQ); 84 85 for (retry = 0; retry < RTW_PWR_POLLING_CNT; retry++) { 86 if (rtw_read8(rtwdev, REG_SDIO_HSUS_CTRL) & BIT_HCI_RESUME_RDY) 87 break; 88 89 usleep_range(10, 50); 90 } 91 92 if (retry == RTW_PWR_POLLING_CNT) { 93 rtw_err(rtwdev, "failed to poll REG_SDIO_HSUS_CTRL[1]"); 94 return -ETIMEDOUT; 95 } 96 97 if (rtw_sdio_is_sdio30_supported(rtwdev)) 98 rtw_write8_set(rtwdev, REG_HCI_OPT_CTRL + 2, 99 BIT_SDIO_PAD_E5 >> 16); 100 else 101 rtw_write8_clr(rtwdev, REG_HCI_OPT_CTRL + 2, 102 BIT_SDIO_PAD_E5 >> 16); 103 break; 104 case RTW_HCI_TYPE_USB: 105 break; 106 default: 107 return -EINVAL; 108 } 109 110 /* config PIN Mux */ 111 value32 = rtw_read32(rtwdev, REG_PAD_CTRL1); 112 value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL; 113 rtw_write32(rtwdev, REG_PAD_CTRL1, value32); 114 115 value32 = rtw_read32(rtwdev, REG_LED_CFG); 116 value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN); 117 rtw_write32(rtwdev, REG_LED_CFG, value32); 118 119 value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG); 120 value32 |= BIT_WLRFE_4_5_EN; 121 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32); 122 123 /* disable BB/RF */ 124 value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN); 125 value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST); 126 rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8); 127 128 value8 = rtw_read8(rtwdev, REG_RF_CTRL); 129 value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN); 130 rtw_write8(rtwdev, REG_RF_CTRL, value8); 131 132 value32 = rtw_read32(rtwdev, REG_WLRF1); 133 value32 &= ~BIT_WLRF1_BBRF_EN; 134 rtw_write32(rtwdev, REG_WLRF1, value32); 135 136 return 0; 137 } 138 139 static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target) 140 { 141 u32 val; 142 143 target &= mask; 144 145 return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target, 146 50, 50 * RTW_PWR_POLLING_CNT, false, 147 rtwdev, addr) == 0; 148 } 149 150 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev, 151 const struct rtw_pwr_seq_cmd *cmd) 152 { 153 u8 value; 154 u32 offset; 155 156 if (cmd->base == RTW_PWR_ADDR_SDIO) 157 offset = cmd->offset | SDIO_LOCAL_OFFSET; 158 else 159 offset = cmd->offset; 160 161 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) 162 return 0; 163 164 if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE) 165 goto err; 166 167 /* if PCIE, toggle BIT_PFM_WOWL and try again */ 168 value = rtw_read8(rtwdev, REG_SYS_PW_CTRL); 169 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) 170 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); 171 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); 172 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL); 173 if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D) 174 rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL); 175 176 if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value)) 177 return 0; 178 179 err: 180 rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n", 181 offset, cmd->mask, cmd->value); 182 return -EBUSY; 183 } 184 185 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask, 186 u8 cut_mask, 187 const struct rtw_pwr_seq_cmd *cmd) 188 { 189 const struct rtw_pwr_seq_cmd *cur_cmd; 190 u32 offset; 191 u8 value; 192 193 for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) { 194 if (!(cur_cmd->intf_mask & intf_mask) || 195 !(cur_cmd->cut_mask & cut_mask)) 196 continue; 197 198 switch (cur_cmd->cmd) { 199 case RTW_PWR_CMD_WRITE: 200 offset = cur_cmd->offset; 201 202 if (cur_cmd->base == RTW_PWR_ADDR_SDIO) 203 offset |= SDIO_LOCAL_OFFSET; 204 205 value = rtw_read8(rtwdev, offset); 206 value &= ~cur_cmd->mask; 207 value |= (cur_cmd->value & cur_cmd->mask); 208 rtw_write8(rtwdev, offset, value); 209 break; 210 case RTW_PWR_CMD_POLLING: 211 if (rtw_pwr_cmd_polling(rtwdev, cur_cmd)) 212 return -EBUSY; 213 break; 214 case RTW_PWR_CMD_DELAY: 215 if (cur_cmd->value == RTW_PWR_DELAY_US) 216 udelay(cur_cmd->offset); 217 else 218 mdelay(cur_cmd->offset); 219 break; 220 case RTW_PWR_CMD_READ: 221 break; 222 default: 223 return -EINVAL; 224 } 225 } 226 227 return 0; 228 } 229 230 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev, 231 const struct rtw_pwr_seq_cmd **cmd_seq) 232 { 233 u8 cut_mask; 234 u8 intf_mask; 235 u8 cut; 236 u32 idx = 0; 237 const struct rtw_pwr_seq_cmd *cmd; 238 int ret; 239 240 cut = rtwdev->hal.cut_version; 241 cut_mask = cut_version_to_mask(cut); 242 switch (rtw_hci_type(rtwdev)) { 243 case RTW_HCI_TYPE_PCIE: 244 intf_mask = RTW_PWR_INTF_PCI_MSK; 245 break; 246 case RTW_HCI_TYPE_USB: 247 intf_mask = RTW_PWR_INTF_USB_MSK; 248 break; 249 case RTW_HCI_TYPE_SDIO: 250 intf_mask = RTW_PWR_INTF_SDIO_MSK; 251 break; 252 default: 253 return -EINVAL; 254 } 255 256 do { 257 cmd = cmd_seq[idx]; 258 if (!cmd) 259 break; 260 261 ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd); 262 if (ret) 263 return ret; 264 265 idx++; 266 } while (1); 267 268 return 0; 269 } 270 271 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on) 272 { 273 const struct rtw_chip_info *chip = rtwdev->chip; 274 const struct rtw_pwr_seq_cmd **pwr_seq; 275 u32 imr = 0; 276 u8 rpwm; 277 bool cur_pwr; 278 int ret; 279 280 if (rtw_chip_wcpu_11ac(rtwdev)) { 281 rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr); 282 283 /* Check FW still exist or not */ 284 if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) { 285 rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE; 286 rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm); 287 } 288 } 289 290 if (rtw_read8(rtwdev, REG_CR) == 0xea) 291 cur_pwr = false; 292 else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && 293 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0))) 294 cur_pwr = false; 295 else 296 cur_pwr = true; 297 298 if (pwr_on == cur_pwr) 299 return -EALREADY; 300 301 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) { 302 imr = rtw_read32(rtwdev, REG_SDIO_HIMR); 303 rtw_write32(rtwdev, REG_SDIO_HIMR, 0); 304 } 305 306 if (!pwr_on) 307 clear_bit(RTW_FLAG_POWERON, rtwdev->flags); 308 309 pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq; 310 ret = rtw_pwr_seq_parser(rtwdev, pwr_seq); 311 312 if (pwr_on && rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) { 313 if (chip->id == RTW_CHIP_TYPE_8822C || 314 chip->id == RTW_CHIP_TYPE_8822B || 315 chip->id == RTW_CHIP_TYPE_8821C) 316 rtw_write8_clr(rtwdev, REG_SYS_STATUS1 + 1, BIT(0)); 317 } 318 319 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) 320 rtw_write32(rtwdev, REG_SDIO_HIMR, imr); 321 322 if (!ret && pwr_on) 323 set_bit(RTW_FLAG_POWERON, rtwdev->flags); 324 325 return ret; 326 } 327 328 static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) 329 { 330 u8 sys_func_en = rtwdev->chip->sys_func_en; 331 u8 value8; 332 u32 value, tmp; 333 334 value = rtw_read32(rtwdev, REG_CPU_DMEM_CON); 335 value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN; 336 rtw_write32(rtwdev, REG_CPU_DMEM_CON, value); 337 338 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en); 339 value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C; 340 rtw_write8(rtwdev, REG_CR_EXT + 3, value8); 341 342 /* disable boot-from-flash for driver's DL FW */ 343 tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL); 344 if (tmp & BIT_BOOT_FSPI_EN) { 345 rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN)); 346 value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN); 347 rtw_write32(rtwdev, REG_GPIO_MUXCFG, value); 348 } 349 350 return 0; 351 } 352 353 static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev) 354 { 355 rtw_write8(rtwdev, REG_CR, 0xff); 356 mdelay(2); 357 rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f); 358 mdelay(2); 359 360 rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN); 361 rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC); 362 363 rtw_write16(rtwdev, REG_CR, 0x2ff); 364 365 return 0; 366 } 367 368 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev) 369 { 370 if (rtw_chip_wcpu_11n(rtwdev)) 371 return __rtw_mac_init_system_cfg_legacy(rtwdev); 372 373 return __rtw_mac_init_system_cfg(rtwdev); 374 } 375 376 int rtw_mac_power_on(struct rtw_dev *rtwdev) 377 { 378 int ret = 0; 379 380 ret = rtw_mac_pre_system_cfg(rtwdev); 381 if (ret) 382 goto err; 383 384 ret = rtw_mac_power_switch(rtwdev, true); 385 if (ret == -EALREADY) { 386 rtw_mac_power_switch(rtwdev, false); 387 388 ret = rtw_mac_pre_system_cfg(rtwdev); 389 if (ret) 390 goto err; 391 392 ret = rtw_mac_power_switch(rtwdev, true); 393 if (ret) 394 goto err; 395 } else if (ret) { 396 goto err; 397 } 398 399 ret = rtw_mac_init_system_cfg(rtwdev); 400 if (ret) 401 goto err; 402 403 return 0; 404 405 err: 406 rtw_err(rtwdev, "mac power on failed"); 407 return ret; 408 } 409 410 void rtw_mac_power_off(struct rtw_dev *rtwdev) 411 { 412 rtw_mac_power_switch(rtwdev, false); 413 } 414 415 static bool check_firmware_size(const u8 *data, u32 size) 416 { 417 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; 418 u32 dmem_size; 419 u32 imem_size; 420 u32 emem_size; 421 u32 real_size; 422 423 dmem_size = le32_to_cpu(fw_hdr->dmem_size); 424 imem_size = le32_to_cpu(fw_hdr->imem_size); 425 emem_size = (fw_hdr->mem_usage & BIT(4)) ? 426 le32_to_cpu(fw_hdr->emem_size) : 0; 427 428 dmem_size += FW_HDR_CHKSUM_SIZE; 429 imem_size += FW_HDR_CHKSUM_SIZE; 430 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; 431 real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size; 432 if (real_size != size) 433 return false; 434 435 return true; 436 } 437 438 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable) 439 { 440 if (enable) { 441 /* cpu io interface enable */ 442 rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); 443 444 /* cpu enable */ 445 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 446 } else { 447 /* cpu io interface disable */ 448 rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 449 450 /* cpu disable */ 451 rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF); 452 } 453 } 454 455 #define DLFW_RESTORE_REG_NUM 6 456 457 static void download_firmware_reg_backup(struct rtw_dev *rtwdev, 458 struct rtw_backup_info *bckp) 459 { 460 u8 tmp; 461 u8 bckp_idx = 0; 462 463 /* set HIQ to hi priority */ 464 bckp[bckp_idx].len = 1; 465 bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1; 466 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1); 467 bckp_idx++; 468 tmp = RTW_DMA_MAPPING_HIGH << 6; 469 rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp); 470 471 /* DLFW only use HIQ, map HIQ to hi priority */ 472 bckp[bckp_idx].len = 1; 473 bckp[bckp_idx].reg = REG_CR; 474 bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR); 475 bckp_idx++; 476 bckp[bckp_idx].len = 4; 477 bckp[bckp_idx].reg = REG_H2CQ_CSR; 478 bckp[bckp_idx].val = BIT_H2CQ_FULL; 479 bckp_idx++; 480 tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN; 481 rtw_write8(rtwdev, REG_CR, tmp); 482 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); 483 484 /* Config hi priority queue and public priority queue page number */ 485 bckp[bckp_idx].len = 2; 486 bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1; 487 bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1); 488 bckp_idx++; 489 bckp[bckp_idx].len = 4; 490 bckp[bckp_idx].reg = REG_RQPN_CTRL_2; 491 bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN; 492 bckp_idx++; 493 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200); 494 rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val); 495 496 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) 497 rtw_read32(rtwdev, REG_SDIO_FREE_TXPG); 498 499 /* Disable beacon related functions */ 500 tmp = rtw_read8(rtwdev, REG_BCN_CTRL); 501 bckp[bckp_idx].len = 1; 502 bckp[bckp_idx].reg = REG_BCN_CTRL; 503 bckp[bckp_idx].val = tmp; 504 bckp_idx++; 505 tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT); 506 rtw_write8(rtwdev, REG_BCN_CTRL, tmp); 507 508 WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n"); 509 } 510 511 static void download_firmware_reset_platform(struct rtw_dev *rtwdev) 512 { 513 rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); 514 rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); 515 rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16); 516 rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8); 517 } 518 519 static void download_firmware_reg_restore(struct rtw_dev *rtwdev, 520 struct rtw_backup_info *bckp, 521 u8 bckp_num) 522 { 523 rtw_restore_reg(rtwdev, bckp, bckp_num); 524 } 525 526 #define TX_DESC_SIZE 48 527 528 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, 529 const u8 *data, u32 size) 530 { 531 u8 *buf; 532 int ret; 533 534 buf = kmemdup(data, size, GFP_KERNEL); 535 if (!buf) 536 return -ENOMEM; 537 538 ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size); 539 kfree(buf); 540 return ret; 541 } 542 543 static int 544 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size) 545 { 546 int ret; 547 548 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && 549 !((size + TX_DESC_SIZE) & (512 - 1))) 550 size += 1; 551 552 ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size); 553 if (ret) 554 rtw_err(rtwdev, "failed to download rsvd page\n"); 555 556 return ret; 557 } 558 559 static int 560 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl) 561 { 562 rtw_write32(rtwdev, REG_DDMA_CH0SA, src); 563 rtw_write32(rtwdev, REG_DDMA_CH0DA, dst); 564 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl); 565 566 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) 567 return -EBUSY; 568 569 return 0; 570 } 571 572 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst, 573 u32 len, u8 first) 574 { 575 u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN; 576 577 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) 578 return -EBUSY; 579 580 ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN; 581 if (!first) 582 ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT; 583 584 if (iddma_enable(rtwdev, src, dst, ch0_ctrl)) 585 return -EBUSY; 586 587 return 0; 588 } 589 590 int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size) 591 { 592 u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE; 593 594 if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) { 595 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n"); 596 return -EBUSY; 597 } 598 599 ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN; 600 601 if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) { 602 rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n"); 603 return -EBUSY; 604 } 605 606 return 0; 607 } 608 609 static bool 610 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr) 611 { 612 u8 fw_ctrl; 613 614 fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL); 615 616 if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) { 617 if (addr < OCPBASE_DMEM_88XX) { 618 fw_ctrl |= BIT_IMEM_DW_OK; 619 fw_ctrl &= ~BIT_IMEM_CHKSUM_OK; 620 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 621 } else { 622 fw_ctrl |= BIT_DMEM_DW_OK; 623 fw_ctrl &= ~BIT_DMEM_CHKSUM_OK; 624 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 625 } 626 627 rtw_err(rtwdev, "invalid fw checksum\n"); 628 629 return false; 630 } 631 632 if (addr < OCPBASE_DMEM_88XX) { 633 fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK); 634 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 635 } else { 636 fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK); 637 rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 638 } 639 640 return true; 641 } 642 643 static int 644 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data, 645 u32 src, u32 dst, u32 size) 646 { 647 const struct rtw_chip_info *chip = rtwdev->chip; 648 u32 desc_size = chip->tx_pkt_desc_sz; 649 u8 first_part; 650 u32 mem_offset; 651 u32 residue_size; 652 u32 pkt_size; 653 u32 max_size = 0x1000; 654 u32 val; 655 int ret; 656 657 mem_offset = 0; 658 first_part = 1; 659 residue_size = size; 660 661 val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL); 662 val |= BIT_DDMACH0_RESET_CHKSUM_STS; 663 rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val); 664 665 while (residue_size) { 666 if (residue_size >= max_size) 667 pkt_size = max_size; 668 else 669 pkt_size = residue_size; 670 671 ret = send_firmware_pkt(rtwdev, (u16)(src >> 7), 672 data + mem_offset, pkt_size); 673 if (ret) 674 return ret; 675 676 ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX + 677 src + desc_size, 678 dst + mem_offset, pkt_size, 679 first_part); 680 if (ret) 681 return ret; 682 683 first_part = 0; 684 mem_offset += pkt_size; 685 residue_size -= pkt_size; 686 } 687 688 if (!check_fw_checksum(rtwdev, dst)) 689 return -EINVAL; 690 691 return 0; 692 } 693 694 static int 695 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size) 696 { 697 const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data; 698 const u8 *cur_fw; 699 u16 val; 700 u32 imem_size; 701 u32 dmem_size; 702 u32 emem_size; 703 u32 addr; 704 int ret; 705 706 dmem_size = le32_to_cpu(fw_hdr->dmem_size); 707 imem_size = le32_to_cpu(fw_hdr->imem_size); 708 emem_size = (fw_hdr->mem_usage & BIT(4)) ? 709 le32_to_cpu(fw_hdr->emem_size) : 0; 710 dmem_size += FW_HDR_CHKSUM_SIZE; 711 imem_size += FW_HDR_CHKSUM_SIZE; 712 emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0; 713 714 val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800); 715 val |= BIT_MCUFWDL_EN; 716 rtw_write16(rtwdev, REG_MCUFW_CTRL, val); 717 718 cur_fw = data + FW_HDR_SIZE; 719 addr = le32_to_cpu(fw_hdr->dmem_addr); 720 addr &= ~BIT(31); 721 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size); 722 if (ret) 723 return ret; 724 725 cur_fw = data + FW_HDR_SIZE + dmem_size; 726 addr = le32_to_cpu(fw_hdr->imem_addr); 727 addr &= ~BIT(31); 728 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size); 729 if (ret) 730 return ret; 731 732 if (emem_size) { 733 cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size; 734 addr = le32_to_cpu(fw_hdr->emem_addr); 735 addr &= ~BIT(31); 736 ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, 737 emem_size); 738 if (ret) 739 return ret; 740 } 741 742 return 0; 743 } 744 745 static int download_firmware_validate(struct rtw_dev *rtwdev) 746 { 747 u32 fw_key; 748 749 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) { 750 fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK; 751 if (fw_key == ILLEGAL_KEY_GROUP) 752 rtw_err(rtwdev, "invalid fw key\n"); 753 return -EINVAL; 754 } 755 756 return 0; 757 } 758 759 static void download_firmware_end_flow(struct rtw_dev *rtwdev) 760 { 761 u16 fw_ctrl; 762 763 rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF); 764 765 /* Check IMEM & DMEM checksum is OK or not */ 766 fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL); 767 if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK) 768 return; 769 770 fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN; 771 rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl); 772 } 773 774 static int __rtw_download_firmware(struct rtw_dev *rtwdev, 775 struct rtw_fw_state *fw) 776 { 777 struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM]; 778 const u8 *data = fw->firmware->data; 779 u32 size = fw->firmware->size; 780 u32 ltecoex_bckp; 781 int ret; 782 783 if (!check_firmware_size(data, size)) 784 return -EINVAL; 785 786 if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) 787 return -EBUSY; 788 789 wlan_cpu_enable(rtwdev, false); 790 791 download_firmware_reg_backup(rtwdev, bckp); 792 download_firmware_reset_platform(rtwdev); 793 794 ret = start_download_firmware(rtwdev, data, size); 795 if (ret) 796 goto dlfw_fail; 797 798 download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM); 799 800 download_firmware_end_flow(rtwdev); 801 802 wlan_cpu_enable(rtwdev, true); 803 804 if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) { 805 ret = -EBUSY; 806 goto dlfw_fail; 807 } 808 809 ret = download_firmware_validate(rtwdev); 810 if (ret) 811 goto dlfw_fail; 812 813 /* reset desc and index */ 814 rtw_hci_setup(rtwdev); 815 816 rtwdev->h2c.last_box_num = 0; 817 rtwdev->h2c.seq = 0; 818 819 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); 820 821 return 0; 822 823 dlfw_fail: 824 /* Disable FWDL_EN */ 825 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 826 rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN); 827 828 return ret; 829 } 830 831 static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en) 832 { 833 int try; 834 835 if (en) { 836 wlan_cpu_enable(rtwdev, false); 837 wlan_cpu_enable(rtwdev, true); 838 839 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 840 841 for (try = 0; try < 10; try++) { 842 if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN) 843 goto fwdl_ready; 844 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 845 msleep(20); 846 } 847 rtw_err(rtwdev, "failed to check fw download ready\n"); 848 fwdl_ready: 849 rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN); 850 } else { 851 rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN); 852 } 853 } 854 855 static void 856 write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size) 857 { 858 u32 val32; 859 u32 block_nr; 860 u32 remain_size; 861 u32 write_addr = FW_START_ADDR_LEGACY; 862 const __le32 *ptr = (const __le32 *)data; 863 u32 block; 864 __le32 remain_data = 0; 865 866 block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY; 867 remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1); 868 869 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 870 val32 &= ~BIT_ROM_PGE; 871 val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE; 872 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); 873 874 for (block = 0; block < block_nr; block++) { 875 rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr)); 876 877 write_addr += DLFW_BLK_SIZE_LEGACY; 878 ptr++; 879 } 880 881 if (remain_size) { 882 memcpy(&remain_data, ptr, remain_size); 883 rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data)); 884 } 885 } 886 887 static int 888 download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size) 889 { 890 u32 page; 891 u32 total_page; 892 u32 last_page_size; 893 894 data += sizeof(struct rtw_fw_hdr_legacy); 895 size -= sizeof(struct rtw_fw_hdr_legacy); 896 897 total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY; 898 last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1); 899 900 rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT); 901 902 for (page = 0; page < total_page; page++) { 903 write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY); 904 data += DLFW_PAGE_SIZE_LEGACY; 905 } 906 if (last_page_size) 907 write_firmware_page(rtwdev, page, data, last_page_size); 908 909 if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) { 910 rtw_err(rtwdev, "failed to check download firmware report\n"); 911 return -EINVAL; 912 } 913 914 return 0; 915 } 916 917 static int download_firmware_validate_legacy(struct rtw_dev *rtwdev) 918 { 919 u32 val32; 920 int try; 921 922 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 923 val32 |= BIT_MCUFWDL_RDY; 924 val32 &= ~BIT_WINTINI_RDY; 925 rtw_write32(rtwdev, REG_MCUFW_CTRL, val32); 926 927 wlan_cpu_enable(rtwdev, false); 928 wlan_cpu_enable(rtwdev, true); 929 930 for (try = 0; try < 10; try++) { 931 val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL); 932 if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY) 933 return 0; 934 msleep(20); 935 } 936 937 rtw_err(rtwdev, "failed to validate firmware\n"); 938 return -EINVAL; 939 } 940 941 static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev, 942 struct rtw_fw_state *fw) 943 { 944 int ret = 0; 945 946 /* reset firmware if still present */ 947 if (rtwdev->chip->id == RTW_CHIP_TYPE_8703B && 948 rtw_read8_mask(rtwdev, REG_MCUFW_CTRL, BIT_RAM_DL_SEL)) { 949 rtw_write8(rtwdev, REG_MCUFW_CTRL, 0x00); 950 } 951 952 en_download_firmware_legacy(rtwdev, true); 953 ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size); 954 en_download_firmware_legacy(rtwdev, false); 955 if (ret) 956 goto out; 957 958 ret = download_firmware_validate_legacy(rtwdev); 959 if (ret) 960 goto out; 961 962 /* reset desc and index */ 963 rtw_hci_setup(rtwdev); 964 965 rtwdev->h2c.last_box_num = 0; 966 rtwdev->h2c.seq = 0; 967 968 set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags); 969 970 out: 971 return ret; 972 } 973 974 static 975 int _rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) 976 { 977 if (rtw_chip_wcpu_11n(rtwdev)) 978 return __rtw_download_firmware_legacy(rtwdev, fw); 979 980 return __rtw_download_firmware(rtwdev, fw); 981 } 982 983 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw) 984 { 985 int ret; 986 987 ret = _rtw_download_firmware(rtwdev, fw); 988 if (ret) 989 return ret; 990 991 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE && 992 rtwdev->chip->id == RTW_CHIP_TYPE_8821C) 993 rtw_fw_set_recover_bt_device(rtwdev); 994 995 return 0; 996 } 997 998 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues) 999 { 1000 const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn; 1001 u32 prio_queues = 0; 1002 1003 if (queues & BIT(IEEE80211_AC_VO)) 1004 prio_queues |= BIT(rqpn->dma_map_vo); 1005 if (queues & BIT(IEEE80211_AC_VI)) 1006 prio_queues |= BIT(rqpn->dma_map_vi); 1007 if (queues & BIT(IEEE80211_AC_BE)) 1008 prio_queues |= BIT(rqpn->dma_map_be); 1009 if (queues & BIT(IEEE80211_AC_BK)) 1010 prio_queues |= BIT(rqpn->dma_map_bk); 1011 1012 return prio_queues; 1013 } 1014 1015 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev, 1016 u32 prio_queue, bool drop) 1017 { 1018 const struct rtw_chip_info *chip = rtwdev->chip; 1019 const struct rtw_prioq_addr *addr; 1020 bool wsize; 1021 u16 avail_page, rsvd_page; 1022 int i; 1023 1024 if (prio_queue >= RTW_DMA_MAPPING_MAX) 1025 return; 1026 1027 addr = &chip->prioq_addrs->prio[prio_queue]; 1028 wsize = chip->prioq_addrs->wsize; 1029 1030 /* check if all of the reserved pages are available for 100 msecs */ 1031 for (i = 0; i < 5; i++) { 1032 rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) : 1033 rtw_read8(rtwdev, addr->rsvd); 1034 avail_page = wsize ? rtw_read16(rtwdev, addr->avail) : 1035 rtw_read8(rtwdev, addr->avail); 1036 if (rsvd_page == avail_page) 1037 return; 1038 1039 msleep(20); 1040 } 1041 1042 /* priority queue is still not empty, throw a debug message 1043 * 1044 * Note that if we want to flush the tx queue when having a lot of 1045 * traffic (ex, 100Mbps up), some of the packets could be dropped. 1046 * And it requires like ~2secs to flush the full priority queue. 1047 */ 1048 if (!drop) 1049 rtw_dbg(rtwdev, RTW_DBG_UNEXP, 1050 "timed out to flush queue %d\n", prio_queue); 1051 } 1052 1053 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev, 1054 u32 prio_queues, bool drop) 1055 { 1056 u32 q; 1057 1058 for (q = 0; q < RTW_DMA_MAPPING_MAX; q++) 1059 if (prio_queues & BIT(q)) 1060 __rtw_mac_flush_prio_queue(rtwdev, q, drop); 1061 } 1062 1063 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop) 1064 { 1065 u32 prio_queues = 0; 1066 1067 /* If all of the hardware queues are requested to flush, 1068 * or the priority queues are not mapped yet, 1069 * flush all of the priority queues 1070 */ 1071 if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn) 1072 prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1; 1073 else 1074 prio_queues = get_priority_queues(rtwdev, queues); 1075 1076 rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop); 1077 } 1078 1079 static int txdma_queue_mapping(struct rtw_dev *rtwdev) 1080 { 1081 const struct rtw_chip_info *chip = rtwdev->chip; 1082 const struct rtw_rqpn *rqpn = NULL; 1083 u16 txdma_pq_map = 0; 1084 1085 switch (rtw_hci_type(rtwdev)) { 1086 case RTW_HCI_TYPE_PCIE: 1087 rqpn = &chip->rqpn_table[1]; 1088 break; 1089 case RTW_HCI_TYPE_USB: 1090 if (rtwdev->hci.bulkout_num == 2) 1091 rqpn = &chip->rqpn_table[2]; 1092 else if (rtwdev->hci.bulkout_num == 3) 1093 rqpn = &chip->rqpn_table[3]; 1094 else if (rtwdev->hci.bulkout_num == 4) 1095 rqpn = &chip->rqpn_table[4]; 1096 else 1097 return -EINVAL; 1098 break; 1099 case RTW_HCI_TYPE_SDIO: 1100 rqpn = &chip->rqpn_table[0]; 1101 break; 1102 default: 1103 return -EINVAL; 1104 } 1105 1106 rtwdev->fifo.rqpn = rqpn; 1107 txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi); 1108 txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg); 1109 txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk); 1110 txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be); 1111 txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi); 1112 txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo); 1113 rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map); 1114 1115 rtw_write8(rtwdev, REG_CR, 0); 1116 rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE); 1117 if (rtw_chip_wcpu_11ac(rtwdev)) 1118 rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL); 1119 1120 if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) { 1121 rtw_read32(rtwdev, REG_SDIO_FREE_TXPG); 1122 rtw_write32(rtwdev, REG_SDIO_TX_CTRL, 0); 1123 } else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) { 1124 rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_ARBBW_EN); 1125 } 1126 1127 return 0; 1128 } 1129 1130 static int set_trx_fifo_info(struct rtw_dev *rtwdev) 1131 { 1132 const struct rtw_chip_info *chip = rtwdev->chip; 1133 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1134 u16 cur_pg_addr; 1135 u8 csi_buf_pg_num = chip->csi_buf_pg_num; 1136 1137 /* config rsvd page num */ 1138 fifo->rsvd_drv_pg_num = chip->rsvd_drv_pg_num; 1139 fifo->txff_pg_num = chip->txff_size >> 7; 1140 if (rtw_chip_wcpu_11n(rtwdev)) 1141 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num; 1142 else 1143 fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num + 1144 RSVD_PG_H2C_EXTRAINFO_NUM + 1145 RSVD_PG_H2C_STATICINFO_NUM + 1146 RSVD_PG_H2CQ_NUM + 1147 RSVD_PG_CPU_INSTRUCTION_NUM + 1148 RSVD_PG_FW_TXBUF_NUM + 1149 csi_buf_pg_num; 1150 1151 if (fifo->rsvd_pg_num > fifo->txff_pg_num) 1152 return -ENOMEM; 1153 1154 fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num; 1155 fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num; 1156 1157 cur_pg_addr = fifo->txff_pg_num; 1158 if (rtw_chip_wcpu_11ac(rtwdev)) { 1159 cur_pg_addr -= csi_buf_pg_num; 1160 fifo->rsvd_csibuf_addr = cur_pg_addr; 1161 cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM; 1162 fifo->rsvd_fw_txbuf_addr = cur_pg_addr; 1163 cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM; 1164 fifo->rsvd_cpu_instr_addr = cur_pg_addr; 1165 cur_pg_addr -= RSVD_PG_H2CQ_NUM; 1166 fifo->rsvd_h2cq_addr = cur_pg_addr; 1167 cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM; 1168 fifo->rsvd_h2c_sta_info_addr = cur_pg_addr; 1169 cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM; 1170 fifo->rsvd_h2c_info_addr = cur_pg_addr; 1171 } 1172 cur_pg_addr -= fifo->rsvd_drv_pg_num; 1173 fifo->rsvd_drv_addr = cur_pg_addr; 1174 1175 if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) { 1176 rtw_err(rtwdev, "wrong rsvd driver address\n"); 1177 return -EINVAL; 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int __priority_queue_cfg(struct rtw_dev *rtwdev, 1184 const struct rtw_page_table *pg_tbl, 1185 u16 pubq_num) 1186 { 1187 const struct rtw_chip_info *chip = rtwdev->chip; 1188 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1189 1190 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num); 1191 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num); 1192 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num); 1193 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num); 1194 rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num); 1195 rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN); 1196 1197 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary); 1198 rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16); 1199 1200 rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary); 1201 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary); 1202 rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary); 1203 rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1); 1204 1205 if (rtwdev->hci.type == RTW_HCI_TYPE_USB) { 1206 rtw_write8_mask(rtwdev, REG_AUTO_LLT_V1, BIT_MASK_BLK_DESC_NUM, 1207 chip->usb_tx_agg_desc_num); 1208 1209 rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3, chip->usb_tx_agg_desc_num); 1210 rtw_write8_set(rtwdev, REG_TXDMA_OFFSET_CHK + 1, BIT(1)); 1211 } 1212 1213 rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1); 1214 1215 if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0)) 1216 return -EBUSY; 1217 1218 rtw_write8(rtwdev, REG_CR + 3, 0); 1219 1220 return 0; 1221 } 1222 1223 static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev, 1224 const struct rtw_page_table *pg_tbl, 1225 u16 pubq_num) 1226 { 1227 const struct rtw_chip_info *chip = rtwdev->chip; 1228 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1229 u32 val32; 1230 1231 val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num); 1232 rtw_write32(rtwdev, REG_RQPN_NPQ, val32); 1233 val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num); 1234 rtw_write32(rtwdev, REG_RQPN, val32); 1235 1236 rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary); 1237 rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1); 1238 rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary); 1239 rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary); 1240 rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary); 1241 rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary); 1242 1243 rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT); 1244 1245 if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0)) 1246 return -EBUSY; 1247 1248 return 0; 1249 } 1250 1251 static int priority_queue_cfg(struct rtw_dev *rtwdev) 1252 { 1253 const struct rtw_chip_info *chip = rtwdev->chip; 1254 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1255 const struct rtw_page_table *pg_tbl = NULL; 1256 u16 pubq_num; 1257 int ret; 1258 1259 ret = set_trx_fifo_info(rtwdev); 1260 if (ret) 1261 return ret; 1262 1263 switch (rtw_hci_type(rtwdev)) { 1264 case RTW_HCI_TYPE_PCIE: 1265 pg_tbl = &chip->page_table[1]; 1266 break; 1267 case RTW_HCI_TYPE_USB: 1268 if (rtwdev->hci.bulkout_num == 2) 1269 pg_tbl = &chip->page_table[2]; 1270 else if (rtwdev->hci.bulkout_num == 3) 1271 pg_tbl = &chip->page_table[3]; 1272 else if (rtwdev->hci.bulkout_num == 4) 1273 pg_tbl = &chip->page_table[4]; 1274 else 1275 return -EINVAL; 1276 break; 1277 case RTW_HCI_TYPE_SDIO: 1278 pg_tbl = &chip->page_table[0]; 1279 break; 1280 default: 1281 return -EINVAL; 1282 } 1283 1284 pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num - 1285 pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num; 1286 if (rtw_chip_wcpu_11n(rtwdev)) 1287 return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num); 1288 else 1289 return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num); 1290 } 1291 1292 static int init_h2c(struct rtw_dev *rtwdev) 1293 { 1294 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1295 u8 value8; 1296 u32 value32; 1297 u32 h2cq_addr; 1298 u32 h2cq_size; 1299 u32 h2cq_free; 1300 u32 wp, rp; 1301 1302 if (rtw_chip_wcpu_11n(rtwdev)) 1303 return 0; 1304 1305 h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT; 1306 h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT; 1307 1308 value32 = rtw_read32(rtwdev, REG_H2C_HEAD); 1309 value32 = (value32 & 0xFFFC0000) | h2cq_addr; 1310 rtw_write32(rtwdev, REG_H2C_HEAD, value32); 1311 1312 value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR); 1313 value32 = (value32 & 0xFFFC0000) | h2cq_addr; 1314 rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32); 1315 1316 value32 = rtw_read32(rtwdev, REG_H2C_TAIL); 1317 value32 &= 0xFFFC0000; 1318 value32 |= (h2cq_addr + h2cq_size); 1319 rtw_write32(rtwdev, REG_H2C_TAIL, value32); 1320 1321 value8 = rtw_read8(rtwdev, REG_H2C_INFO); 1322 value8 = (u8)((value8 & 0xFC) | 0x01); 1323 rtw_write8(rtwdev, REG_H2C_INFO, value8); 1324 1325 value8 = rtw_read8(rtwdev, REG_H2C_INFO); 1326 value8 = (u8)((value8 & 0xFB) | 0x04); 1327 rtw_write8(rtwdev, REG_H2C_INFO, value8); 1328 1329 value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1); 1330 value8 = (u8)((value8 & 0x7f) | 0x80); 1331 rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8); 1332 1333 wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF; 1334 rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF; 1335 h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp; 1336 1337 if (h2cq_size != h2cq_free) { 1338 rtw_err(rtwdev, "H2C queue mismatch\n"); 1339 return -EINVAL; 1340 } 1341 1342 return 0; 1343 } 1344 1345 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev) 1346 { 1347 int ret; 1348 1349 ret = txdma_queue_mapping(rtwdev); 1350 if (ret) 1351 return ret; 1352 1353 ret = priority_queue_cfg(rtwdev); 1354 if (ret) 1355 return ret; 1356 1357 ret = init_h2c(rtwdev); 1358 if (ret) 1359 return ret; 1360 1361 return 0; 1362 } 1363 1364 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev) 1365 { 1366 u8 value8; 1367 1368 rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE); 1369 if (rtw_chip_wcpu_11ac(rtwdev)) { 1370 value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1); 1371 value8 &= 0xF0; 1372 /* For rxdesc len = 0 issue */ 1373 value8 |= 0xF; 1374 rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8); 1375 } 1376 rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS); 1377 rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9)); 1378 1379 return 0; 1380 } 1381 1382 int rtw_mac_init(struct rtw_dev *rtwdev) 1383 { 1384 const struct rtw_chip_info *chip = rtwdev->chip; 1385 int ret; 1386 1387 ret = rtw_init_trx_cfg(rtwdev); 1388 if (ret) 1389 return ret; 1390 1391 ret = chip->ops->mac_init(rtwdev); 1392 if (ret) 1393 return ret; 1394 1395 ret = rtw_drv_info_cfg(rtwdev); 1396 if (ret) 1397 return ret; 1398 1399 rtw_hci_interface_cfg(rtwdev); 1400 1401 return 0; 1402 } 1403