1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "debug.h" 8 #include "efuse.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "pci.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "util.h" 15 16 static const u32 rtw89_mac_mem_base_addrs_ax[RTW89_MAC_MEM_NUM] = { 17 [RTW89_MAC_MEM_AXIDMA] = AXIDMA_BASE_ADDR, 18 [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR, 19 [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR, 20 [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR, 21 [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR, 22 [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR, 23 [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR, 24 [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR, 25 [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR, 26 [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR, 27 [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR, 28 [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR, 29 [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR, 30 [RTW89_MAC_MEM_TXD_FIFO_0] = TXD_FIFO_0_BASE_ADDR, 31 [RTW89_MAC_MEM_TXD_FIFO_1] = TXD_FIFO_1_BASE_ADDR, 32 [RTW89_MAC_MEM_TXDATA_FIFO_0] = TXDATA_FIFO_0_BASE_ADDR, 33 [RTW89_MAC_MEM_TXDATA_FIFO_1] = TXDATA_FIFO_1_BASE_ADDR, 34 [RTW89_MAC_MEM_CPU_LOCAL] = CPU_LOCAL_BASE_ADDR, 35 [RTW89_MAC_MEM_BSSID_CAM] = BSSID_CAM_BASE_ADDR, 36 [RTW89_MAC_MEM_TXD_FIFO_0_V1] = TXD_FIFO_0_BASE_ADDR_V1, 37 [RTW89_MAC_MEM_TXD_FIFO_1_V1] = TXD_FIFO_1_BASE_ADDR_V1, 38 }; 39 40 static void rtw89_mac_mem_write(struct rtw89_dev *rtwdev, u32 offset, 41 u32 val, enum rtw89_mac_mem_sel sel) 42 { 43 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 44 u32 addr = mac->mem_base_addrs[sel] + offset; 45 46 rtw89_write32(rtwdev, mac->filter_model_addr, addr); 47 rtw89_write32(rtwdev, mac->indir_access_addr, val); 48 } 49 50 static u32 rtw89_mac_mem_read(struct rtw89_dev *rtwdev, u32 offset, 51 enum rtw89_mac_mem_sel sel) 52 { 53 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 54 u32 addr = mac->mem_base_addrs[sel] + offset; 55 56 rtw89_write32(rtwdev, mac->filter_model_addr, addr); 57 return rtw89_read32(rtwdev, mac->indir_access_addr); 58 } 59 60 int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, 61 enum rtw89_mac_hwmod_sel sel) 62 { 63 u32 val, r_val; 64 65 if (sel == RTW89_DMAC_SEL) { 66 r_val = rtw89_read32(rtwdev, R_AX_DMAC_FUNC_EN); 67 val = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN); 68 } else if (sel == RTW89_CMAC_SEL && mac_idx == 0) { 69 r_val = rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN); 70 val = B_AX_CMAC_EN; 71 } else if (sel == RTW89_CMAC_SEL && mac_idx == 1) { 72 r_val = rtw89_read32(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND); 73 val = B_AX_CMAC1_FEN; 74 } else { 75 return -EINVAL; 76 } 77 if (r_val == RTW89_R32_EA || r_val == RTW89_R32_DEAD || 78 (val & r_val) != val) 79 return -EFAULT; 80 81 return 0; 82 } 83 84 int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val) 85 { 86 u8 lte_ctrl; 87 int ret; 88 89 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 90 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 91 if (ret) 92 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 93 94 rtw89_write32(rtwdev, R_AX_LTE_WDATA, val); 95 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0xC00F0000 | offset); 96 97 return ret; 98 } 99 100 int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val) 101 { 102 u8 lte_ctrl; 103 int ret; 104 105 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 106 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 107 if (ret) 108 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 109 110 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset); 111 *val = rtw89_read32(rtwdev, R_AX_LTE_RDATA); 112 113 return ret; 114 } 115 116 static 117 int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl) 118 { 119 u32 ctrl_reg, data_reg, ctrl_data; 120 u32 val; 121 int ret; 122 123 switch (ctrl->type) { 124 case DLE_CTRL_TYPE_WDE: 125 ctrl_reg = R_AX_WDE_DBG_FUN_INTF_CTL; 126 data_reg = R_AX_WDE_DBG_FUN_INTF_DATA; 127 ctrl_data = FIELD_PREP(B_AX_WDE_DFI_TRGSEL_MASK, ctrl->target) | 128 FIELD_PREP(B_AX_WDE_DFI_ADDR_MASK, ctrl->addr) | 129 B_AX_WDE_DFI_ACTIVE; 130 break; 131 case DLE_CTRL_TYPE_PLE: 132 ctrl_reg = R_AX_PLE_DBG_FUN_INTF_CTL; 133 data_reg = R_AX_PLE_DBG_FUN_INTF_DATA; 134 ctrl_data = FIELD_PREP(B_AX_PLE_DFI_TRGSEL_MASK, ctrl->target) | 135 FIELD_PREP(B_AX_PLE_DFI_ADDR_MASK, ctrl->addr) | 136 B_AX_PLE_DFI_ACTIVE; 137 break; 138 default: 139 rtw89_warn(rtwdev, "[ERR] dfi ctrl type %d\n", ctrl->type); 140 return -EINVAL; 141 } 142 143 rtw89_write32(rtwdev, ctrl_reg, ctrl_data); 144 145 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_WDE_DFI_ACTIVE), 146 1, 1000, false, rtwdev, ctrl_reg); 147 if (ret) { 148 rtw89_warn(rtwdev, "[ERR] dle dfi ctrl 0x%X set 0x%X timeout\n", 149 ctrl_reg, ctrl_data); 150 return ret; 151 } 152 153 ctrl->out_data = rtw89_read32(rtwdev, data_reg); 154 return 0; 155 } 156 157 static int dle_dfi_quota(struct rtw89_dev *rtwdev, 158 struct rtw89_mac_dle_dfi_quota *quota) 159 { 160 struct rtw89_mac_dle_dfi_ctrl ctrl; 161 int ret; 162 163 ctrl.type = quota->dle_type; 164 ctrl.target = DLE_DFI_TYPE_QUOTA; 165 ctrl.addr = quota->qtaid; 166 ret = dle_dfi_ctrl(rtwdev, &ctrl); 167 if (ret) { 168 rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); 169 return ret; 170 } 171 172 quota->rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, ctrl.out_data); 173 quota->use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, ctrl.out_data); 174 return 0; 175 } 176 177 static int dle_dfi_qempty(struct rtw89_dev *rtwdev, 178 struct rtw89_mac_dle_dfi_qempty *qempty) 179 { 180 struct rtw89_mac_dle_dfi_ctrl ctrl; 181 u32 ret; 182 183 ctrl.type = qempty->dle_type; 184 ctrl.target = DLE_DFI_TYPE_QEMPTY; 185 ctrl.addr = qempty->grpsel; 186 ret = dle_dfi_ctrl(rtwdev, &ctrl); 187 if (ret) { 188 rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); 189 return ret; 190 } 191 192 qempty->qempty = FIELD_GET(B_AX_DLE_QEMPTY_GRP, ctrl.out_data); 193 return 0; 194 } 195 196 static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev) 197 { 198 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ", 199 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); 200 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_ISR=0x%08x\n", 201 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); 202 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_IMR=0x%08x ", 203 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); 204 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_ISR=0x%08x\n", 205 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); 206 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_IMR=0x%08x ", 207 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); 208 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_ISR=0x%08x\n", 209 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); 210 } 211 212 static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev) 213 { 214 struct rtw89_mac_dle_dfi_qempty qempty; 215 struct rtw89_mac_dle_dfi_quota quota; 216 struct rtw89_mac_dle_dfi_ctrl ctrl; 217 u32 val, not_empty, i; 218 int ret; 219 220 qempty.dle_type = DLE_CTRL_TYPE_PLE; 221 qempty.grpsel = 0; 222 qempty.qempty = ~(u32)0; 223 ret = dle_dfi_qempty(rtwdev, &qempty); 224 if (ret) 225 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 226 else 227 rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty); 228 229 for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) { 230 if (!(not_empty & BIT(0))) 231 continue; 232 ctrl.type = DLE_CTRL_TYPE_PLE; 233 ctrl.target = DLE_DFI_TYPE_QLNKTBL; 234 ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) | 235 FIELD_PREP(QLNKTBL_ADDR_TBL_IDX_MASK, i); 236 ret = dle_dfi_ctrl(rtwdev, &ctrl); 237 if (ret) 238 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 239 else 240 rtw89_info(rtwdev, "qidx%d pktcnt = %ld\n", i, 241 FIELD_GET(QLNKTBL_DATA_SEL1_PKT_CNT_MASK, 242 ctrl.out_data)); 243 } 244 245 quota.dle_type = DLE_CTRL_TYPE_PLE; 246 quota.qtaid = 6; 247 ret = dle_dfi_quota(rtwdev, "a); 248 if (ret) 249 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 250 else 251 rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n", 252 quota.rsv_pgnum, quota.use_pgnum); 253 254 val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG); 255 rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%lx\n", 256 FIELD_GET(B_AX_PLE_Q6_MIN_SIZE_MASK, val)); 257 rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%lx\n", 258 FIELD_GET(B_AX_PLE_Q6_MAX_SIZE_MASK, val)); 259 260 dump_err_status_dispatcher(rtwdev); 261 } 262 263 static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev, 264 enum mac_ax_err_info err) 265 { 266 u32 dbg, event; 267 268 dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO); 269 event = FIELD_GET(B_AX_L0_TO_L1_EVENT_MASK, dbg); 270 271 switch (event) { 272 case MAC_AX_L0_TO_L1_RX_QTA_LOST: 273 rtw89_info(rtwdev, "quota lost!\n"); 274 rtw89_mac_dump_qta_lost(rtwdev); 275 break; 276 default: 277 break; 278 } 279 } 280 281 static void rtw89_mac_dump_dmac_err_status(struct rtw89_dev *rtwdev) 282 { 283 const struct rtw89_chip_info *chip = rtwdev->chip; 284 u32 dmac_err; 285 int i, ret; 286 287 ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); 288 if (ret) { 289 rtw89_warn(rtwdev, "[DMAC] : DMAC not enabled\n"); 290 return; 291 } 292 293 dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); 294 rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err); 295 rtw89_info(rtwdev, "R_AX_DMAC_ERR_IMR=0x%08x\n", 296 rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR)); 297 298 if (dmac_err) { 299 rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n", 300 rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1)); 301 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n", 302 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1)); 303 if (chip->chip_id == RTL8852C) { 304 rtw89_info(rtwdev, "R_AX_PLE_ERRFLAG_MSG=0x%08x\n", 305 rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG)); 306 rtw89_info(rtwdev, "R_AX_WDE_ERRFLAG_MSG=0x%08x\n", 307 rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG)); 308 rtw89_info(rtwdev, "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n", 309 rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN)); 310 rtw89_info(rtwdev, "R_AX_PLE_DBGERR_STS=0x%08x\n", 311 rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS)); 312 } 313 } 314 315 if (dmac_err & B_AX_WDRLS_ERR_FLAG) { 316 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR=0x%08x\n", 317 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR)); 318 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR=0x%08x\n", 319 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); 320 if (chip->chip_id == RTL8852C) 321 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX=0x%08x\n", 322 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1)); 323 else 324 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX=0x%08x\n", 325 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 326 } 327 328 if (dmac_err & B_AX_WSEC_ERR_FLAG) { 329 if (chip->chip_id == RTL8852C) { 330 rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR=0x%08x\n", 331 rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR)); 332 rtw89_info(rtwdev, "R_AX_SEC_ERR_ISR=0x%08x\n", 333 rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG)); 334 rtw89_info(rtwdev, "R_AX_SEC_ENG_CTRL=0x%08x\n", 335 rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); 336 rtw89_info(rtwdev, "R_AX_SEC_MPDU_PROC=0x%08x\n", 337 rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); 338 rtw89_info(rtwdev, "R_AX_SEC_CAM_ACCESS=0x%08x\n", 339 rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); 340 rtw89_info(rtwdev, "R_AX_SEC_CAM_RDATA=0x%08x\n", 341 rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); 342 rtw89_info(rtwdev, "R_AX_SEC_DEBUG1=0x%08x\n", 343 rtw89_read32(rtwdev, R_AX_SEC_DEBUG1)); 344 rtw89_info(rtwdev, "R_AX_SEC_TX_DEBUG=0x%08x\n", 345 rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); 346 rtw89_info(rtwdev, "R_AX_SEC_RX_DEBUG=0x%08x\n", 347 rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); 348 349 rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL, 350 B_AX_DBG_SEL0, 0x8B); 351 rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL, 352 B_AX_DBG_SEL1, 0x8B); 353 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, 354 B_AX_SEL_0XC0_MASK, 1); 355 for (i = 0; i < 0x10; i++) { 356 rtw89_write32_mask(rtwdev, R_AX_SEC_ENG_CTRL, 357 B_AX_SEC_DBG_PORT_FIELD_MASK, i); 358 rtw89_info(rtwdev, "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n", 359 i, rtw89_read32(rtwdev, R_AX_SEC_DEBUG2)); 360 } 361 } else { 362 rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR=0x%08x\n", 363 rtw89_read32(rtwdev, R_AX_SEC_DEBUG)); 364 rtw89_info(rtwdev, "R_AX_SEC_ENG_CTRL=0x%08x\n", 365 rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); 366 rtw89_info(rtwdev, "R_AX_SEC_MPDU_PROC=0x%08x\n", 367 rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); 368 rtw89_info(rtwdev, "R_AX_SEC_CAM_ACCESS=0x%08x\n", 369 rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); 370 rtw89_info(rtwdev, "R_AX_SEC_CAM_RDATA=0x%08x\n", 371 rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); 372 rtw89_info(rtwdev, "R_AX_SEC_CAM_WDATA=0x%08x\n", 373 rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA)); 374 rtw89_info(rtwdev, "R_AX_SEC_TX_DEBUG=0x%08x\n", 375 rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); 376 rtw89_info(rtwdev, "R_AX_SEC_RX_DEBUG=0x%08x\n", 377 rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); 378 rtw89_info(rtwdev, "R_AX_SEC_TRX_PKT_CNT=0x%08x\n", 379 rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT)); 380 rtw89_info(rtwdev, "R_AX_SEC_TRX_BLK_CNT=0x%08x\n", 381 rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT)); 382 } 383 } 384 385 if (dmac_err & B_AX_MPDU_ERR_FLAG) { 386 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n", 387 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR)); 388 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n", 389 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); 390 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n", 391 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR)); 392 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n", 393 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); 394 } 395 396 if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) { 397 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n", 398 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR)); 399 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n", 400 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); 401 } 402 403 if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) { 404 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x\n", 405 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 406 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 407 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 408 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x\n", 409 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 410 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 411 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 412 } 413 414 if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) { 415 if (chip->chip_id == RTL8852C) { 416 rtw89_info(rtwdev, "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n", 417 rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR)); 418 rtw89_info(rtwdev, "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n", 419 rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR)); 420 rtw89_info(rtwdev, "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n", 421 rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR)); 422 rtw89_info(rtwdev, "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n", 423 rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR)); 424 } else { 425 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", 426 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); 427 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", 428 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); 429 } 430 } 431 432 if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) { 433 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x\n", 434 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 435 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 436 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 437 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x\n", 438 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 439 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 440 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 441 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_0=0x%08x\n", 442 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0)); 443 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_1=0x%08x\n", 444 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1)); 445 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n", 446 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2)); 447 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n", 448 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS)); 449 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n", 450 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0)); 451 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n", 452 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1)); 453 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n", 454 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2)); 455 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n", 456 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS)); 457 if (chip->chip_id == RTL8852C) { 458 rtw89_info(rtwdev, "R_AX_RX_CTRL0=0x%08x\n", 459 rtw89_read32(rtwdev, R_AX_RX_CTRL0)); 460 rtw89_info(rtwdev, "R_AX_RX_CTRL1=0x%08x\n", 461 rtw89_read32(rtwdev, R_AX_RX_CTRL1)); 462 rtw89_info(rtwdev, "R_AX_RX_CTRL2=0x%08x\n", 463 rtw89_read32(rtwdev, R_AX_RX_CTRL2)); 464 } else { 465 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n", 466 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0)); 467 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n", 468 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1)); 469 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n", 470 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2)); 471 } 472 } 473 474 if (dmac_err & B_AX_PKTIN_ERR_FLAG) { 475 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR=0x%08x\n", 476 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); 477 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR=0x%08x\n", 478 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); 479 } 480 481 if (dmac_err & B_AX_DISPATCH_ERR_FLAG) { 482 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n", 483 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); 484 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n", 485 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); 486 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n", 487 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); 488 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n", 489 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); 490 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n", 491 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); 492 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n", 493 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); 494 } 495 496 if (dmac_err & B_AX_BBRPT_ERR_FLAG) { 497 if (chip->chip_id == RTL8852C) { 498 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n", 499 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR)); 500 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n", 501 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR)); 502 rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n", 503 rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR)); 504 rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n", 505 rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR)); 506 rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n", 507 rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR)); 508 rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n", 509 rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR)); 510 } else { 511 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", 512 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); 513 rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n", 514 rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR)); 515 rtw89_info(rtwdev, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n", 516 rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR)); 517 rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n", 518 rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR)); 519 rtw89_info(rtwdev, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n", 520 rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR)); 521 } 522 } 523 524 if (dmac_err & B_AX_HAXIDMA_ERR_FLAG && chip->chip_id == RTL8852C) { 525 rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n", 526 rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK)); 527 rtw89_info(rtwdev, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n", 528 rtw89_read32(rtwdev, R_AX_HAXI_IDCT)); 529 } 530 } 531 532 static void rtw89_mac_dump_cmac_err_status(struct rtw89_dev *rtwdev, 533 u8 band) 534 { 535 const struct rtw89_chip_info *chip = rtwdev->chip; 536 u32 offset = 0; 537 u32 cmac_err; 538 int ret; 539 540 ret = rtw89_mac_check_mac_en(rtwdev, band, RTW89_CMAC_SEL); 541 if (ret) { 542 if (band) 543 rtw89_warn(rtwdev, "[CMAC] : CMAC1 not enabled\n"); 544 else 545 rtw89_warn(rtwdev, "[CMAC] : CMAC0 not enabled\n"); 546 return; 547 } 548 549 if (band) 550 offset = RTW89_MAC_AX_BAND_REG_OFFSET; 551 552 cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset); 553 rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band, 554 rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset)); 555 rtw89_info(rtwdev, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band, 556 rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset)); 557 rtw89_info(rtwdev, "R_AX_CK_EN [%d]=0x%08x\n", band, 558 rtw89_read32(rtwdev, R_AX_CK_EN + offset)); 559 560 if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) { 561 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band, 562 rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset)); 563 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band, 564 rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset)); 565 } 566 567 if (cmac_err & B_AX_PTCL_TOP_ERR_IND) { 568 rtw89_info(rtwdev, "R_AX_PTCL_IMR0 [%d]=0x%08x\n", band, 569 rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset)); 570 rtw89_info(rtwdev, "R_AX_PTCL_ISR0 [%d]=0x%08x\n", band, 571 rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset)); 572 } 573 574 if (cmac_err & B_AX_DMA_TOP_ERR_IND) { 575 if (chip->chip_id == RTL8852C) { 576 rtw89_info(rtwdev, "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band, 577 rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset)); 578 rtw89_info(rtwdev, "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n", band, 579 rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset)); 580 } else { 581 rtw89_info(rtwdev, "R_AX_DLE_CTRL [%d]=0x%08x\n", band, 582 rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset)); 583 } 584 } 585 586 if (cmac_err & B_AX_DMA_TOP_ERR_IND || cmac_err & B_AX_WMAC_RX_ERR_IND) { 587 if (chip->chip_id == RTL8852C) { 588 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n", band, 589 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset)); 590 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band, 591 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset)); 592 } else { 593 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band, 594 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset)); 595 } 596 } 597 598 if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) { 599 rtw89_info(rtwdev, "R_AX_TXPWR_IMR [%d]=0x%08x\n", band, 600 rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset)); 601 rtw89_info(rtwdev, "R_AX_TXPWR_ISR [%d]=0x%08x\n", band, 602 rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset)); 603 } 604 605 if (cmac_err & B_AX_WMAC_TX_ERR_IND) { 606 if (chip->chip_id == RTL8852C) { 607 rtw89_info(rtwdev, "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", band, 608 rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA + offset)); 609 rtw89_info(rtwdev, "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", band, 610 rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA_MASK + offset)); 611 } else { 612 rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n", band, 613 rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR + offset)); 614 } 615 rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band, 616 rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset)); 617 } 618 619 rtw89_info(rtwdev, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band, 620 rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset)); 621 } 622 623 static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev, 624 enum mac_ax_err_info err) 625 { 626 if (err != MAC_AX_ERR_L1_ERR_DMAC && 627 err != MAC_AX_ERR_L0_PROMOTE_TO_L1 && 628 err != MAC_AX_ERR_L0_ERR_CMAC0 && 629 err != MAC_AX_ERR_L0_ERR_CMAC1 && 630 err != MAC_AX_ERR_RXI300) 631 return; 632 633 rtw89_info(rtwdev, "--->\nerr=0x%x\n", err); 634 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n", 635 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 636 637 rtw89_mac_dump_dmac_err_status(rtwdev); 638 rtw89_mac_dump_cmac_err_status(rtwdev, RTW89_MAC_0); 639 if (rtwdev->dbcc_en) 640 rtw89_mac_dump_cmac_err_status(rtwdev, RTW89_MAC_1); 641 642 rtwdev->hci.ops->dump_err_status(rtwdev); 643 644 if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1) 645 rtw89_mac_dump_l0_to_l1(rtwdev, err); 646 647 rtw89_info(rtwdev, "<---\n"); 648 } 649 650 static bool rtw89_mac_suppress_log(struct rtw89_dev *rtwdev, u32 err) 651 { 652 struct rtw89_ser *ser = &rtwdev->ser; 653 u32 dmac_err, imr, isr; 654 int ret; 655 656 if (rtwdev->chip->chip_id == RTL8852C) { 657 ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); 658 if (ret) 659 return true; 660 661 if (err == MAC_AX_ERR_L1_ERR_DMAC) { 662 dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); 663 imr = rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR); 664 isr = rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR); 665 666 if ((dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) && 667 ((isr & imr) & B_AX_B0_ISR_ERR_CMDPSR_FRZTO)) { 668 set_bit(RTW89_SER_SUPPRESS_LOG, ser->flags); 669 return true; 670 } 671 } else if (err == MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE) { 672 if (test_bit(RTW89_SER_SUPPRESS_LOG, ser->flags)) 673 return true; 674 } else if (err == MAC_AX_ERR_L1_RESET_RECOVERY_DONE) { 675 if (test_and_clear_bit(RTW89_SER_SUPPRESS_LOG, ser->flags)) 676 return true; 677 } 678 } 679 680 return false; 681 } 682 683 u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev) 684 { 685 u32 err, err_scnr; 686 int ret; 687 688 ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000, 689 false, rtwdev, R_AX_HALT_C2H_CTRL); 690 if (ret) { 691 rtw89_warn(rtwdev, "Polling FW err status fail\n"); 692 return ret; 693 } 694 695 err = rtw89_read32(rtwdev, R_AX_HALT_C2H); 696 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 697 698 err_scnr = RTW89_ERROR_SCENARIO(err); 699 if (err_scnr == RTW89_WCPU_CPU_EXCEPTION) 700 err = MAC_AX_ERR_CPU_EXCEPTION; 701 else if (err_scnr == RTW89_WCPU_ASSERTION) 702 err = MAC_AX_ERR_ASSERTION; 703 else if (err_scnr == RTW89_RXI300_ERROR) 704 err = MAC_AX_ERR_RXI300; 705 706 if (rtw89_mac_suppress_log(rtwdev, err)) 707 return err; 708 709 rtw89_fw_st_dbg_dump(rtwdev); 710 rtw89_mac_dump_err_status(rtwdev, err); 711 712 return err; 713 } 714 EXPORT_SYMBOL(rtw89_mac_get_err_status); 715 716 int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err) 717 { 718 struct rtw89_ser *ser = &rtwdev->ser; 719 u32 halt; 720 int ret = 0; 721 722 if (err > MAC_AX_SET_ERR_MAX) { 723 rtw89_err(rtwdev, "Bad set-err-status value 0x%08x\n", err); 724 return -EINVAL; 725 } 726 727 ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000, 728 100000, false, rtwdev, R_AX_HALT_H2C_CTRL); 729 if (ret) { 730 rtw89_err(rtwdev, "FW doesn't receive previous msg\n"); 731 return -EFAULT; 732 } 733 734 rtw89_write32(rtwdev, R_AX_HALT_H2C, err); 735 736 if (ser->prehandle_l1 && 737 (err == MAC_AX_ERR_L1_DISABLE_EN || err == MAC_AX_ERR_L1_RCVY_EN)) 738 return 0; 739 740 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER); 741 742 return 0; 743 } 744 EXPORT_SYMBOL(rtw89_mac_set_err_status); 745 746 static int hfc_reset_param(struct rtw89_dev *rtwdev) 747 { 748 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 749 struct rtw89_hfc_param_ini param_ini = {NULL}; 750 u8 qta_mode = rtwdev->mac.dle_info.qta_mode; 751 752 switch (rtwdev->hci.type) { 753 case RTW89_HCI_TYPE_PCIE: 754 param_ini = rtwdev->chip->hfc_param_ini[qta_mode]; 755 param->en = 0; 756 break; 757 default: 758 return -EINVAL; 759 } 760 761 if (param_ini.pub_cfg) 762 param->pub_cfg = *param_ini.pub_cfg; 763 764 if (param_ini.prec_cfg) 765 param->prec_cfg = *param_ini.prec_cfg; 766 767 if (param_ini.ch_cfg) 768 param->ch_cfg = param_ini.ch_cfg; 769 770 memset(¶m->ch_info, 0, sizeof(param->ch_info)); 771 memset(¶m->pub_info, 0, sizeof(param->pub_info)); 772 param->mode = param_ini.mode; 773 774 return 0; 775 } 776 777 static int hfc_ch_cfg_chk(struct rtw89_dev *rtwdev, u8 ch) 778 { 779 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 780 const struct rtw89_hfc_ch_cfg *ch_cfg = param->ch_cfg; 781 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 782 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 783 784 if (ch >= RTW89_DMA_CH_NUM) 785 return -EINVAL; 786 787 if ((ch_cfg[ch].min && ch_cfg[ch].min < prec_cfg->ch011_prec) || 788 ch_cfg[ch].max > pub_cfg->pub_max) 789 return -EINVAL; 790 if (ch_cfg[ch].grp >= grp_num) 791 return -EINVAL; 792 793 return 0; 794 } 795 796 static int hfc_pub_info_chk(struct rtw89_dev *rtwdev) 797 { 798 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 799 const struct rtw89_hfc_pub_cfg *cfg = ¶m->pub_cfg; 800 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 801 802 if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) { 803 if (rtwdev->chip->chip_id == RTL8852A) 804 return 0; 805 else 806 return -EFAULT; 807 } 808 809 return 0; 810 } 811 812 static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev) 813 { 814 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 815 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 816 817 if (pub_cfg->grp0 + pub_cfg->grp1 != pub_cfg->pub_max) 818 return -EFAULT; 819 820 return 0; 821 } 822 823 static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch) 824 { 825 const struct rtw89_chip_info *chip = rtwdev->chip; 826 const struct rtw89_page_regs *regs = chip->page_regs; 827 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 828 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 829 int ret = 0; 830 u32 val = 0; 831 832 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 833 if (ret) 834 return ret; 835 836 ret = hfc_ch_cfg_chk(rtwdev, ch); 837 if (ret) 838 return ret; 839 840 if (ch > RTW89_DMA_B1HI) 841 return -EINVAL; 842 843 val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) | 844 u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) | 845 (cfg[ch].grp ? B_AX_GRP : 0); 846 rtw89_write32(rtwdev, regs->ach_page_ctrl + ch * 4, val); 847 848 return 0; 849 } 850 851 static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch) 852 { 853 const struct rtw89_chip_info *chip = rtwdev->chip; 854 const struct rtw89_page_regs *regs = chip->page_regs; 855 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 856 struct rtw89_hfc_ch_info *info = param->ch_info; 857 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 858 u32 val; 859 u32 ret; 860 861 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 862 if (ret) 863 return ret; 864 865 if (ch > RTW89_DMA_H2C) 866 return -EINVAL; 867 868 val = rtw89_read32(rtwdev, regs->ach_page_info + ch * 4); 869 info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK); 870 if (ch < RTW89_DMA_H2C) 871 info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK); 872 else 873 info[ch].used = cfg[ch].min - info[ch].aval; 874 875 return 0; 876 } 877 878 static int hfc_pub_ctrl(struct rtw89_dev *rtwdev) 879 { 880 const struct rtw89_chip_info *chip = rtwdev->chip; 881 const struct rtw89_page_regs *regs = chip->page_regs; 882 const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg; 883 u32 val; 884 int ret; 885 886 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 887 if (ret) 888 return ret; 889 890 ret = hfc_pub_cfg_chk(rtwdev); 891 if (ret) 892 return ret; 893 894 val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) | 895 u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK); 896 rtw89_write32(rtwdev, regs->pub_page_ctrl1, val); 897 898 val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK); 899 rtw89_write32(rtwdev, regs->wp_page_ctrl2, val); 900 901 return 0; 902 } 903 904 static int hfc_upd_mix_info(struct rtw89_dev *rtwdev) 905 { 906 const struct rtw89_chip_info *chip = rtwdev->chip; 907 const struct rtw89_page_regs *regs = chip->page_regs; 908 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 909 struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 910 struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 911 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 912 u32 val; 913 int ret; 914 915 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 916 if (ret) 917 return ret; 918 919 val = rtw89_read32(rtwdev, regs->pub_page_info1); 920 info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK); 921 info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK); 922 val = rtw89_read32(rtwdev, regs->pub_page_info3); 923 info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK); 924 info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK); 925 info->pub_aval = 926 u32_get_bits(rtw89_read32(rtwdev, regs->pub_page_info2), 927 B_AX_PUB_AVAL_PG_MASK); 928 info->wp_aval = 929 u32_get_bits(rtw89_read32(rtwdev, regs->wp_page_info1), 930 B_AX_WP_AVAL_PG_MASK); 931 932 val = rtw89_read32(rtwdev, regs->hci_fc_ctrl); 933 param->en = val & B_AX_HCI_FC_EN ? 1 : 0; 934 param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0; 935 param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK); 936 prec_cfg->ch011_full_cond = 937 u32_get_bits(val, B_AX_HCI_FC_WD_FULL_COND_MASK); 938 prec_cfg->h2c_full_cond = 939 u32_get_bits(val, B_AX_HCI_FC_CH12_FULL_COND_MASK); 940 prec_cfg->wp_ch07_full_cond = 941 u32_get_bits(val, B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 942 prec_cfg->wp_ch811_full_cond = 943 u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 944 945 val = rtw89_read32(rtwdev, regs->ch_page_ctrl); 946 prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK); 947 prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK); 948 949 val = rtw89_read32(rtwdev, regs->pub_page_ctrl2); 950 pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK); 951 952 val = rtw89_read32(rtwdev, regs->wp_page_ctrl1); 953 prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK); 954 prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK); 955 956 val = rtw89_read32(rtwdev, regs->wp_page_ctrl2); 957 pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK); 958 959 val = rtw89_read32(rtwdev, regs->pub_page_ctrl1); 960 pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK); 961 pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK); 962 963 ret = hfc_pub_info_chk(rtwdev); 964 if (param->en && ret) 965 return ret; 966 967 return 0; 968 } 969 970 static void hfc_h2c_cfg(struct rtw89_dev *rtwdev) 971 { 972 const struct rtw89_chip_info *chip = rtwdev->chip; 973 const struct rtw89_page_regs *regs = chip->page_regs; 974 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 975 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 976 u32 val; 977 978 val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 979 rtw89_write32(rtwdev, regs->ch_page_ctrl, val); 980 981 rtw89_write32_mask(rtwdev, regs->hci_fc_ctrl, 982 B_AX_HCI_FC_CH12_FULL_COND_MASK, 983 prec_cfg->h2c_full_cond); 984 } 985 986 static void hfc_mix_cfg(struct rtw89_dev *rtwdev) 987 { 988 const struct rtw89_chip_info *chip = rtwdev->chip; 989 const struct rtw89_page_regs *regs = chip->page_regs; 990 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 991 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 992 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 993 u32 val; 994 995 val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) | 996 u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 997 rtw89_write32(rtwdev, regs->ch_page_ctrl, val); 998 999 val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK); 1000 rtw89_write32(rtwdev, regs->pub_page_ctrl2, val); 1001 1002 val = u32_encode_bits(prec_cfg->wp_ch07_prec, 1003 B_AX_PREC_PAGE_WP_CH07_MASK) | 1004 u32_encode_bits(prec_cfg->wp_ch811_prec, 1005 B_AX_PREC_PAGE_WP_CH811_MASK); 1006 rtw89_write32(rtwdev, regs->wp_page_ctrl1, val); 1007 1008 val = u32_replace_bits(rtw89_read32(rtwdev, regs->hci_fc_ctrl), 1009 param->mode, B_AX_HCI_FC_MODE_MASK); 1010 val = u32_replace_bits(val, prec_cfg->ch011_full_cond, 1011 B_AX_HCI_FC_WD_FULL_COND_MASK); 1012 val = u32_replace_bits(val, prec_cfg->h2c_full_cond, 1013 B_AX_HCI_FC_CH12_FULL_COND_MASK); 1014 val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond, 1015 B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 1016 val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond, 1017 B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 1018 rtw89_write32(rtwdev, regs->hci_fc_ctrl, val); 1019 } 1020 1021 static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en) 1022 { 1023 const struct rtw89_chip_info *chip = rtwdev->chip; 1024 const struct rtw89_page_regs *regs = chip->page_regs; 1025 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 1026 u32 val; 1027 1028 val = rtw89_read32(rtwdev, regs->hci_fc_ctrl); 1029 param->en = en; 1030 param->h2c_en = h2c_en; 1031 val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN); 1032 val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) : 1033 (val & ~B_AX_HCI_FC_CH12_EN); 1034 rtw89_write32(rtwdev, regs->hci_fc_ctrl, val); 1035 } 1036 1037 static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en) 1038 { 1039 const struct rtw89_chip_info *chip = rtwdev->chip; 1040 u32 dma_ch_mask = chip->dma_ch_mask; 1041 u8 ch; 1042 u32 ret = 0; 1043 1044 if (reset) 1045 ret = hfc_reset_param(rtwdev); 1046 if (ret) 1047 return ret; 1048 1049 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1050 if (ret) 1051 return ret; 1052 1053 hfc_func_en(rtwdev, false, false); 1054 1055 if (!en && h2c_en) { 1056 hfc_h2c_cfg(rtwdev); 1057 hfc_func_en(rtwdev, en, h2c_en); 1058 return ret; 1059 } 1060 1061 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 1062 if (dma_ch_mask & BIT(ch)) 1063 continue; 1064 ret = hfc_ch_ctrl(rtwdev, ch); 1065 if (ret) 1066 return ret; 1067 } 1068 1069 ret = hfc_pub_ctrl(rtwdev); 1070 if (ret) 1071 return ret; 1072 1073 hfc_mix_cfg(rtwdev); 1074 if (en || h2c_en) { 1075 hfc_func_en(rtwdev, en, h2c_en); 1076 udelay(10); 1077 } 1078 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 1079 if (dma_ch_mask & BIT(ch)) 1080 continue; 1081 ret = hfc_upd_ch_info(rtwdev, ch); 1082 if (ret) 1083 return ret; 1084 } 1085 ret = hfc_upd_mix_info(rtwdev); 1086 1087 return ret; 1088 } 1089 1090 #define PWR_POLL_CNT 2000 1091 static int pwr_cmd_poll(struct rtw89_dev *rtwdev, 1092 const struct rtw89_pwr_cfg *cfg) 1093 { 1094 u8 val = 0; 1095 int ret; 1096 u32 addr = cfg->base == PWR_INTF_MSK_SDIO ? 1097 cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr; 1098 1099 ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk), 1100 1000, 1000 * PWR_POLL_CNT, false, rtwdev, addr); 1101 1102 if (!ret) 1103 return 0; 1104 1105 rtw89_warn(rtwdev, "[ERR] Polling timeout\n"); 1106 rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr); 1107 rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val); 1108 1109 return -EBUSY; 1110 } 1111 1112 static int rtw89_mac_sub_pwr_seq(struct rtw89_dev *rtwdev, u8 cv_msk, 1113 u8 intf_msk, const struct rtw89_pwr_cfg *cfg) 1114 { 1115 const struct rtw89_pwr_cfg *cur_cfg; 1116 u32 addr; 1117 u8 val; 1118 1119 for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) { 1120 if (!(cur_cfg->intf_msk & intf_msk) || 1121 !(cur_cfg->cv_msk & cv_msk)) 1122 continue; 1123 1124 switch (cur_cfg->cmd) { 1125 case PWR_CMD_WRITE: 1126 addr = cur_cfg->addr; 1127 1128 if (cur_cfg->base == PWR_BASE_SDIO) 1129 addr |= SDIO_LOCAL_BASE_ADDR; 1130 1131 val = rtw89_read8(rtwdev, addr); 1132 val &= ~(cur_cfg->msk); 1133 val |= (cur_cfg->val & cur_cfg->msk); 1134 1135 rtw89_write8(rtwdev, addr, val); 1136 break; 1137 case PWR_CMD_POLL: 1138 if (pwr_cmd_poll(rtwdev, cur_cfg)) 1139 return -EBUSY; 1140 break; 1141 case PWR_CMD_DELAY: 1142 if (cur_cfg->val == PWR_DELAY_US) 1143 udelay(cur_cfg->addr); 1144 else 1145 fsleep(cur_cfg->addr * 1000); 1146 break; 1147 default: 1148 return -EINVAL; 1149 } 1150 } 1151 1152 return 0; 1153 } 1154 1155 static int rtw89_mac_pwr_seq(struct rtw89_dev *rtwdev, 1156 const struct rtw89_pwr_cfg * const *cfg_seq) 1157 { 1158 int ret; 1159 1160 for (; *cfg_seq; cfg_seq++) { 1161 ret = rtw89_mac_sub_pwr_seq(rtwdev, BIT(rtwdev->hal.cv), 1162 PWR_INTF_MSK_PCIE, *cfg_seq); 1163 if (ret) 1164 return -EBUSY; 1165 } 1166 1167 return 0; 1168 } 1169 1170 static enum rtw89_rpwm_req_pwr_state 1171 rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev) 1172 { 1173 enum rtw89_rpwm_req_pwr_state state; 1174 1175 switch (rtwdev->ps_mode) { 1176 case RTW89_PS_MODE_RFOFF: 1177 state = RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF; 1178 break; 1179 case RTW89_PS_MODE_CLK_GATED: 1180 state = RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED; 1181 break; 1182 case RTW89_PS_MODE_PWR_GATED: 1183 state = RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED; 1184 break; 1185 default: 1186 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 1187 break; 1188 } 1189 return state; 1190 } 1191 1192 static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev, 1193 enum rtw89_rpwm_req_pwr_state req_pwr_state, 1194 bool notify_wake) 1195 { 1196 u16 request; 1197 1198 spin_lock_bh(&rtwdev->rpwm_lock); 1199 1200 request = rtw89_read16(rtwdev, R_AX_RPWM); 1201 request ^= request | PS_RPWM_TOGGLE; 1202 request |= req_pwr_state; 1203 1204 if (notify_wake) { 1205 request |= PS_RPWM_NOTIFY_WAKE; 1206 } else { 1207 rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) & 1208 RPWM_SEQ_NUM_MAX; 1209 request |= FIELD_PREP(PS_RPWM_SEQ_NUM, 1210 rtwdev->mac.rpwm_seq_num); 1211 1212 if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 1213 request |= PS_RPWM_ACK; 1214 } 1215 rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request); 1216 1217 spin_unlock_bh(&rtwdev->rpwm_lock); 1218 } 1219 1220 static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, 1221 enum rtw89_rpwm_req_pwr_state req_pwr_state) 1222 { 1223 bool request_deep_mode; 1224 bool in_deep_mode; 1225 u8 rpwm_req_num; 1226 u8 cpwm_rsp_seq; 1227 u8 cpwm_seq; 1228 u8 cpwm_status; 1229 1230 if (req_pwr_state >= RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 1231 request_deep_mode = true; 1232 else 1233 request_deep_mode = false; 1234 1235 if (rtw89_read32_mask(rtwdev, R_AX_LDM, B_AX_EN_32K)) 1236 in_deep_mode = true; 1237 else 1238 in_deep_mode = false; 1239 1240 if (request_deep_mode != in_deep_mode) 1241 return -EPERM; 1242 1243 if (request_deep_mode) 1244 return 0; 1245 1246 rpwm_req_num = rtwdev->mac.rpwm_seq_num; 1247 cpwm_rsp_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, 1248 PS_CPWM_RSP_SEQ_NUM); 1249 1250 if (rpwm_req_num != cpwm_rsp_seq) 1251 return -EPERM; 1252 1253 rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) & 1254 CPWM_SEQ_NUM_MAX; 1255 1256 cpwm_seq = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_SEQ_NUM); 1257 if (cpwm_seq != rtwdev->mac.cpwm_seq_num) 1258 return -EPERM; 1259 1260 cpwm_status = rtw89_read16_mask(rtwdev, rtwdev->hci.cpwm_addr, PS_CPWM_STATE); 1261 if (cpwm_status != req_pwr_state) 1262 return -EPERM; 1263 1264 return 0; 1265 } 1266 1267 void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter) 1268 { 1269 enum rtw89_rpwm_req_pwr_state state; 1270 unsigned long delay = enter ? 10 : 150; 1271 int ret; 1272 int i; 1273 1274 if (enter) 1275 state = rtw89_mac_get_req_pwr_state(rtwdev); 1276 else 1277 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 1278 1279 for (i = 0; i < RPWM_TRY_CNT; i++) { 1280 rtw89_mac_send_rpwm(rtwdev, state, false); 1281 ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, 1282 !ret, delay, 15000, false, 1283 rtwdev, state); 1284 if (!ret) 1285 break; 1286 1287 if (i == RPWM_TRY_CNT - 1) 1288 rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n", 1289 enter ? "entering" : "leaving"); 1290 else 1291 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 1292 "%d time firmware failed to ack for %s ps mode\n", 1293 i + 1, enter ? "entering" : "leaving"); 1294 } 1295 } 1296 1297 void rtw89_mac_notify_wake(struct rtw89_dev *rtwdev) 1298 { 1299 enum rtw89_rpwm_req_pwr_state state; 1300 1301 state = rtw89_mac_get_req_pwr_state(rtwdev); 1302 rtw89_mac_send_rpwm(rtwdev, state, true); 1303 } 1304 1305 static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) 1306 { 1307 #define PWR_ACT 1 1308 const struct rtw89_chip_info *chip = rtwdev->chip; 1309 const struct rtw89_pwr_cfg * const *cfg_seq; 1310 int (*cfg_func)(struct rtw89_dev *rtwdev); 1311 int ret; 1312 u8 val; 1313 1314 if (on) { 1315 cfg_seq = chip->pwr_on_seq; 1316 cfg_func = chip->ops->pwr_on_func; 1317 } else { 1318 cfg_seq = chip->pwr_off_seq; 1319 cfg_func = chip->ops->pwr_off_func; 1320 } 1321 1322 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 1323 __rtw89_leave_ps_mode(rtwdev); 1324 1325 val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK); 1326 if (on && val == PWR_ACT) { 1327 rtw89_err(rtwdev, "MAC has already powered on\n"); 1328 return -EBUSY; 1329 } 1330 1331 ret = cfg_func ? cfg_func(rtwdev) : rtw89_mac_pwr_seq(rtwdev, cfg_seq); 1332 if (ret) 1333 return ret; 1334 1335 if (on) { 1336 set_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1337 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR); 1338 } else { 1339 clear_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1340 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 1341 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR); 1342 rtw89_set_entity_state(rtwdev, false); 1343 } 1344 1345 return 0; 1346 #undef PWR_ACT 1347 } 1348 1349 void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev) 1350 { 1351 rtw89_mac_power_switch(rtwdev, false); 1352 } 1353 1354 static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 1355 { 1356 u32 func_en = 0; 1357 u32 ck_en = 0; 1358 u32 c1pc_en = 0; 1359 u32 addrl_func_en[] = {R_AX_CMAC_FUNC_EN, R_AX_CMAC_FUNC_EN_C1}; 1360 u32 addrl_ck_en[] = {R_AX_CK_EN, R_AX_CK_EN_C1}; 1361 1362 func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN | 1363 B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | 1364 B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN | 1365 B_AX_CMAC_CRPRT; 1366 ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN | 1367 B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN | 1368 B_AX_RMAC_CKEN; 1369 c1pc_en = B_AX_R_SYM_WLCMAC1_PC_EN | 1370 B_AX_R_SYM_WLCMAC1_P1_PC_EN | 1371 B_AX_R_SYM_WLCMAC1_P2_PC_EN | 1372 B_AX_R_SYM_WLCMAC1_P3_PC_EN | 1373 B_AX_R_SYM_WLCMAC1_P4_PC_EN; 1374 1375 if (en) { 1376 if (mac_idx == RTW89_MAC_1) { 1377 rtw89_write32_set(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1378 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1379 B_AX_R_SYM_ISO_CMAC12PP); 1380 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1381 B_AX_CMAC1_FEN); 1382 } 1383 rtw89_write32_set(rtwdev, addrl_ck_en[mac_idx], ck_en); 1384 rtw89_write32_set(rtwdev, addrl_func_en[mac_idx], func_en); 1385 } else { 1386 rtw89_write32_clr(rtwdev, addrl_func_en[mac_idx], func_en); 1387 rtw89_write32_clr(rtwdev, addrl_ck_en[mac_idx], ck_en); 1388 if (mac_idx == RTW89_MAC_1) { 1389 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1390 B_AX_CMAC1_FEN); 1391 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1392 B_AX_R_SYM_ISO_CMAC12PP); 1393 rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1394 } 1395 } 1396 1397 return 0; 1398 } 1399 1400 static int dmac_func_en(struct rtw89_dev *rtwdev) 1401 { 1402 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1403 u32 val32; 1404 1405 if (chip_id == RTL8852C) 1406 val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | 1407 B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN | 1408 B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | 1409 B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | 1410 B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN | 1411 B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN | 1412 B_AX_DMAC_CRPRT | B_AX_H_AXIDMA_EN); 1413 else 1414 val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | 1415 B_AX_MAC_SEC_EN | B_AX_DISPATCHER_EN | 1416 B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | 1417 B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | 1418 B_AX_STA_SCH_EN | B_AX_TXPKT_CTRL_EN | 1419 B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN | 1420 B_AX_DMAC_CRPRT); 1421 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32); 1422 1423 val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN | 1424 B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN | 1425 B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN | 1426 B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN); 1427 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); 1428 1429 return 0; 1430 } 1431 1432 static int chip_func_en(struct rtw89_dev *rtwdev) 1433 { 1434 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1435 1436 if (chip_id == RTL8852A || chip_id == RTL8852B) 1437 rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0, 1438 B_AX_OCP_L1_MASK); 1439 1440 return 0; 1441 } 1442 1443 static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev) 1444 { 1445 int ret; 1446 1447 ret = dmac_func_en(rtwdev); 1448 if (ret) 1449 return ret; 1450 1451 ret = cmac_func_en(rtwdev, 0, true); 1452 if (ret) 1453 return ret; 1454 1455 ret = chip_func_en(rtwdev); 1456 if (ret) 1457 return ret; 1458 1459 return ret; 1460 } 1461 1462 const struct rtw89_mac_size_set rtw89_mac_size = { 1463 .hfc_preccfg_pcie = {2, 40, 0, 0, 1, 0, 0, 0}, 1464 .hfc_prec_cfg_c0 = {2, 32, 0, 0, 0, 0, 0, 0}, 1465 .hfc_prec_cfg_c2 = {0, 256, 0, 0, 0, 0, 0, 0}, 1466 /* PCIE 64 */ 1467 .wde_size0 = {RTW89_WDE_PG_64, 4095, 1,}, 1468 .wde_size0_v1 = {RTW89_WDE_PG_64, 3328, 0, 0,}, 1469 /* DLFW */ 1470 .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,}, 1471 .wde_size4_v1 = {RTW89_WDE_PG_64, 0, 3328, 0,}, 1472 /* PCIE 64 */ 1473 .wde_size6 = {RTW89_WDE_PG_64, 512, 0,}, 1474 /* 8852B PCIE SCC */ 1475 .wde_size7 = {RTW89_WDE_PG_64, 510, 2,}, 1476 /* DLFW */ 1477 .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,}, 1478 /* 8852C DLFW */ 1479 .wde_size18 = {RTW89_WDE_PG_64, 0, 2048,}, 1480 /* 8852C PCIE SCC */ 1481 .wde_size19 = {RTW89_WDE_PG_64, 3328, 0,}, 1482 /* PCIE */ 1483 .ple_size0 = {RTW89_PLE_PG_128, 1520, 16,}, 1484 .ple_size0_v1 = {RTW89_PLE_PG_128, 2672, 256, 212992,}, 1485 .ple_size3_v1 = {RTW89_PLE_PG_128, 2928, 0, 212992,}, 1486 /* DLFW */ 1487 .ple_size4 = {RTW89_PLE_PG_128, 64, 1472,}, 1488 /* PCIE 64 */ 1489 .ple_size6 = {RTW89_PLE_PG_128, 496, 16,}, 1490 /* DLFW */ 1491 .ple_size8 = {RTW89_PLE_PG_128, 64, 960,}, 1492 /* 8852C DLFW */ 1493 .ple_size18 = {RTW89_PLE_PG_128, 2544, 16,}, 1494 /* 8852C PCIE SCC */ 1495 .ple_size19 = {RTW89_PLE_PG_128, 1904, 16,}, 1496 /* PCIE 64 */ 1497 .wde_qt0 = {3792, 196, 0, 107,}, 1498 .wde_qt0_v1 = {3302, 6, 0, 20,}, 1499 /* DLFW */ 1500 .wde_qt4 = {0, 0, 0, 0,}, 1501 /* PCIE 64 */ 1502 .wde_qt6 = {448, 48, 0, 16,}, 1503 /* 8852B PCIE SCC */ 1504 .wde_qt7 = {446, 48, 0, 16,}, 1505 /* 8852C DLFW */ 1506 .wde_qt17 = {0, 0, 0, 0,}, 1507 /* 8852C PCIE SCC */ 1508 .wde_qt18 = {3228, 60, 0, 40,}, 1509 .ple_qt0 = {320, 0, 32, 16, 13, 13, 292, 0, 32, 18, 1, 4, 0,}, 1510 .ple_qt1 = {320, 0, 32, 16, 1944, 1944, 2223, 0, 1963, 1949, 1, 1935, 0,}, 1511 /* PCIE SCC */ 1512 .ple_qt4 = {264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,}, 1513 /* PCIE SCC */ 1514 .ple_qt5 = {264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,}, 1515 .ple_qt9 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 1, 0, 0,}, 1516 /* DLFW */ 1517 .ple_qt13 = {0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0,}, 1518 /* PCIE 64 */ 1519 .ple_qt18 = {147, 0, 16, 20, 17, 13, 89, 0, 32, 14, 8, 0,}, 1520 /* DLFW 52C */ 1521 .ple_qt44 = {0, 0, 16, 256, 0, 0, 0, 0, 0, 0, 0, 0,}, 1522 /* DLFW 52C */ 1523 .ple_qt45 = {0, 0, 32, 256, 0, 0, 0, 0, 0, 0, 0, 0,}, 1524 /* 8852C PCIE SCC */ 1525 .ple_qt46 = {525, 0, 16, 20, 13, 13, 178, 0, 32, 62, 8, 16,}, 1526 /* 8852C PCIE SCC */ 1527 .ple_qt47 = {525, 0, 32, 20, 1034, 13, 1199, 0, 1053, 62, 160, 1037,}, 1528 /* PCIE 64 */ 1529 .ple_qt58 = {147, 0, 16, 20, 157, 13, 229, 0, 172, 14, 24, 0,}, 1530 /* 8852A PCIE WOW */ 1531 .ple_qt_52a_wow = {264, 0, 32, 20, 64, 13, 1005, 0, 64, 128, 120,}, 1532 /* 8852B PCIE WOW */ 1533 .ple_qt_52b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,}, 1534 /* 8851B PCIE WOW */ 1535 .ple_qt_51b_wow = {147, 0, 16, 20, 157, 13, 133, 0, 172, 14, 24, 0,}, 1536 .ple_rsvd_qt0 = {2, 112, 56, 6, 6, 6, 6, 0, 0, 62,}, 1537 .ple_rsvd_qt1 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, 1538 .rsvd0_size0 = {212992, 0,}, 1539 .rsvd1_size0 = {587776, 2048,}, 1540 }; 1541 EXPORT_SYMBOL(rtw89_mac_size); 1542 1543 static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev, 1544 enum rtw89_qta_mode mode) 1545 { 1546 struct rtw89_mac_info *mac = &rtwdev->mac; 1547 const struct rtw89_dle_mem *cfg; 1548 1549 cfg = &rtwdev->chip->dle_mem[mode]; 1550 if (!cfg) 1551 return NULL; 1552 1553 if (cfg->mode != mode) { 1554 rtw89_warn(rtwdev, "qta mode unmatch!\n"); 1555 return NULL; 1556 } 1557 1558 mac->dle_info.rsvd_qt = cfg->rsvd_qt; 1559 mac->dle_info.ple_pg_size = cfg->ple_size->pge_size; 1560 mac->dle_info.ple_free_pg = cfg->ple_size->lnk_pge_num; 1561 mac->dle_info.qta_mode = mode; 1562 mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma; 1563 mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma; 1564 1565 return cfg; 1566 } 1567 1568 int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev, 1569 enum rtw89_mac_dle_rsvd_qt_type type, 1570 struct rtw89_mac_dle_rsvd_qt_cfg *cfg) 1571 { 1572 struct rtw89_dle_info *dle_info = &rtwdev->mac.dle_info; 1573 const struct rtw89_rsvd_quota *rsvd_qt = dle_info->rsvd_qt; 1574 1575 switch (type) { 1576 case DLE_RSVD_QT_MPDU_INFO: 1577 cfg->pktid = dle_info->ple_free_pg; 1578 cfg->pg_num = rsvd_qt->mpdu_info_tbl; 1579 break; 1580 case DLE_RSVD_QT_B0_CSI: 1581 cfg->pktid = dle_info->ple_free_pg + rsvd_qt->mpdu_info_tbl; 1582 cfg->pg_num = rsvd_qt->b0_csi; 1583 break; 1584 case DLE_RSVD_QT_B1_CSI: 1585 cfg->pktid = dle_info->ple_free_pg + 1586 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi; 1587 cfg->pg_num = rsvd_qt->b1_csi; 1588 break; 1589 case DLE_RSVD_QT_B0_LMR: 1590 cfg->pktid = dle_info->ple_free_pg + 1591 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi; 1592 cfg->pg_num = rsvd_qt->b0_lmr; 1593 break; 1594 case DLE_RSVD_QT_B1_LMR: 1595 cfg->pktid = dle_info->ple_free_pg + 1596 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi + 1597 rsvd_qt->b0_lmr; 1598 cfg->pg_num = rsvd_qt->b1_lmr; 1599 break; 1600 case DLE_RSVD_QT_B0_FTM: 1601 cfg->pktid = dle_info->ple_free_pg + 1602 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi + 1603 rsvd_qt->b0_lmr + rsvd_qt->b1_lmr; 1604 cfg->pg_num = rsvd_qt->b0_ftm; 1605 break; 1606 case DLE_RSVD_QT_B1_FTM: 1607 cfg->pktid = dle_info->ple_free_pg + 1608 rsvd_qt->mpdu_info_tbl + rsvd_qt->b0_csi + rsvd_qt->b1_csi + 1609 rsvd_qt->b0_lmr + rsvd_qt->b1_lmr + rsvd_qt->b0_ftm; 1610 cfg->pg_num = rsvd_qt->b1_ftm; 1611 break; 1612 default: 1613 return -EINVAL; 1614 } 1615 1616 cfg->size = (u32)cfg->pg_num * dle_info->ple_pg_size; 1617 1618 return 0; 1619 } 1620 1621 static bool mac_is_txq_empty(struct rtw89_dev *rtwdev) 1622 { 1623 struct rtw89_mac_dle_dfi_qempty qempty; 1624 u32 qnum, qtmp, val32, msk32; 1625 int i, j, ret; 1626 1627 qnum = rtwdev->chip->wde_qempty_acq_num; 1628 qempty.dle_type = DLE_CTRL_TYPE_WDE; 1629 1630 for (i = 0; i < qnum; i++) { 1631 qempty.grpsel = i; 1632 ret = dle_dfi_qempty(rtwdev, &qempty); 1633 if (ret) { 1634 rtw89_warn(rtwdev, "dle dfi acq empty %d\n", ret); 1635 return false; 1636 } 1637 qtmp = qempty.qempty; 1638 for (j = 0 ; j < QEMP_ACQ_GRP_MACID_NUM; j++) { 1639 val32 = FIELD_GET(QEMP_ACQ_GRP_QSEL_MASK, qtmp); 1640 if (val32 != QEMP_ACQ_GRP_QSEL_MASK) 1641 return false; 1642 qtmp >>= QEMP_ACQ_GRP_QSEL_SH; 1643 } 1644 } 1645 1646 qempty.grpsel = rtwdev->chip->wde_qempty_mgq_sel; 1647 ret = dle_dfi_qempty(rtwdev, &qempty); 1648 if (ret) { 1649 rtw89_warn(rtwdev, "dle dfi mgq empty %d\n", ret); 1650 return false; 1651 } 1652 msk32 = B_CMAC0_MGQ_NORMAL | B_CMAC0_MGQ_NO_PWRSAV | B_CMAC0_CPUMGQ; 1653 if ((qempty.qempty & msk32) != msk32) 1654 return false; 1655 1656 if (rtwdev->dbcc_en) { 1657 msk32 |= B_CMAC1_MGQ_NORMAL | B_CMAC1_MGQ_NO_PWRSAV | B_CMAC1_CPUMGQ; 1658 if ((qempty.qempty & msk32) != msk32) 1659 return false; 1660 } 1661 1662 msk32 = B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU | 1663 B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU | B_AX_PLE_EMPTY_QTA_DMAC_H2C | 1664 B_AX_WDE_EMPTY_QUE_OTHERS | B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | 1665 B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | B_AX_PLE_EMPTY_QTA_DMAC_CPUIO | 1666 B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_HIF | 1667 B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | 1668 B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL | 1669 B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX; 1670 val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0); 1671 1672 return (val32 & msk32) == msk32; 1673 } 1674 1675 static inline u32 dle_used_size(const struct rtw89_dle_size *wde, 1676 const struct rtw89_dle_size *ple) 1677 { 1678 return wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) + 1679 ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num); 1680 } 1681 1682 static u32 dle_expected_used_size(struct rtw89_dev *rtwdev, 1683 enum rtw89_qta_mode mode) 1684 { 1685 u32 size = rtwdev->chip->fifo_size; 1686 1687 if (mode == RTW89_QTA_SCC) 1688 size -= rtwdev->chip->dle_scc_rsvd_size; 1689 1690 return size; 1691 } 1692 1693 static void dle_func_en(struct rtw89_dev *rtwdev, bool enable) 1694 { 1695 if (enable) 1696 rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN, 1697 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1698 else 1699 rtw89_write32_clr(rtwdev, R_AX_DMAC_FUNC_EN, 1700 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1701 } 1702 1703 static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable) 1704 { 1705 u32 val = B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN; 1706 1707 if (enable) { 1708 if (rtwdev->chip->chip_id == RTL8851B) 1709 val |= B_AX_AXIDMA_CLK_EN; 1710 rtw89_write32_set(rtwdev, R_AX_DMAC_CLK_EN, val); 1711 } else { 1712 rtw89_write32_clr(rtwdev, R_AX_DMAC_CLK_EN, val); 1713 } 1714 } 1715 1716 static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg) 1717 { 1718 const struct rtw89_dle_size *size_cfg; 1719 u32 val; 1720 u8 bound = 0; 1721 1722 val = rtw89_read32(rtwdev, R_AX_WDE_PKTBUF_CFG); 1723 size_cfg = cfg->wde_size; 1724 1725 switch (size_cfg->pge_size) { 1726 default: 1727 case RTW89_WDE_PG_64: 1728 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64, 1729 B_AX_WDE_PAGE_SEL_MASK); 1730 break; 1731 case RTW89_WDE_PG_128: 1732 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128, 1733 B_AX_WDE_PAGE_SEL_MASK); 1734 break; 1735 case RTW89_WDE_PG_256: 1736 rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n"); 1737 return -EINVAL; 1738 } 1739 1740 val = u32_replace_bits(val, bound, B_AX_WDE_START_BOUND_MASK); 1741 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1742 B_AX_WDE_FREE_PAGE_NUM_MASK); 1743 rtw89_write32(rtwdev, R_AX_WDE_PKTBUF_CFG, val); 1744 1745 val = rtw89_read32(rtwdev, R_AX_PLE_PKTBUF_CFG); 1746 bound = (size_cfg->lnk_pge_num + size_cfg->unlnk_pge_num) 1747 * size_cfg->pge_size / DLE_BOUND_UNIT; 1748 size_cfg = cfg->ple_size; 1749 1750 switch (size_cfg->pge_size) { 1751 default: 1752 case RTW89_PLE_PG_64: 1753 rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n"); 1754 return -EINVAL; 1755 case RTW89_PLE_PG_128: 1756 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128, 1757 B_AX_PLE_PAGE_SEL_MASK); 1758 break; 1759 case RTW89_PLE_PG_256: 1760 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256, 1761 B_AX_PLE_PAGE_SEL_MASK); 1762 break; 1763 } 1764 1765 val = u32_replace_bits(val, bound, B_AX_PLE_START_BOUND_MASK); 1766 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1767 B_AX_PLE_FREE_PAGE_NUM_MASK); 1768 rtw89_write32(rtwdev, R_AX_PLE_PKTBUF_CFG, val); 1769 1770 return 0; 1771 } 1772 1773 #define INVALID_QT_WCPU U16_MAX 1774 #define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \ 1775 do { \ 1776 val = u32_encode_bits(_min_x, B_AX_ ## _module ## _MIN_SIZE_MASK) | \ 1777 u32_encode_bits(_max_x, B_AX_ ## _module ## _MAX_SIZE_MASK); \ 1778 rtw89_write32(rtwdev, \ 1779 R_AX_ ## _module ## _QTA ## _idx ## _CFG, \ 1780 val); \ 1781 } while (0) 1782 #define SET_QUOTA(_x, _module, _idx) \ 1783 SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx) 1784 1785 static void wde_quota_cfg(struct rtw89_dev *rtwdev, 1786 const struct rtw89_wde_quota *min_cfg, 1787 const struct rtw89_wde_quota *max_cfg, 1788 u16 ext_wde_min_qt_wcpu) 1789 { 1790 u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ? 1791 ext_wde_min_qt_wcpu : min_cfg->wcpu; 1792 u32 val; 1793 1794 SET_QUOTA(hif, WDE, 0); 1795 SET_QUOTA_VAL(min_qt_wcpu, max_cfg->wcpu, WDE, 1); 1796 SET_QUOTA(pkt_in, WDE, 3); 1797 SET_QUOTA(cpu_io, WDE, 4); 1798 } 1799 1800 static void ple_quota_cfg(struct rtw89_dev *rtwdev, 1801 const struct rtw89_ple_quota *min_cfg, 1802 const struct rtw89_ple_quota *max_cfg) 1803 { 1804 u32 val; 1805 1806 SET_QUOTA(cma0_tx, PLE, 0); 1807 SET_QUOTA(cma1_tx, PLE, 1); 1808 SET_QUOTA(c2h, PLE, 2); 1809 SET_QUOTA(h2c, PLE, 3); 1810 SET_QUOTA(wcpu, PLE, 4); 1811 SET_QUOTA(mpdu_proc, PLE, 5); 1812 SET_QUOTA(cma0_dma, PLE, 6); 1813 SET_QUOTA(cma1_dma, PLE, 7); 1814 SET_QUOTA(bb_rpt, PLE, 8); 1815 SET_QUOTA(wd_rel, PLE, 9); 1816 SET_QUOTA(cpu_io, PLE, 10); 1817 if (rtwdev->chip->chip_id == RTL8852C) 1818 SET_QUOTA(tx_rpt, PLE, 11); 1819 } 1820 1821 int rtw89_mac_resize_ple_rx_quota(struct rtw89_dev *rtwdev, bool wow) 1822 { 1823 const struct rtw89_ple_quota *min_cfg, *max_cfg; 1824 const struct rtw89_dle_mem *cfg; 1825 u32 val; 1826 1827 if (rtwdev->chip->chip_id == RTL8852C) 1828 return 0; 1829 1830 if (rtwdev->mac.qta_mode != RTW89_QTA_SCC) { 1831 rtw89_err(rtwdev, "[ERR]support SCC mode only\n"); 1832 return -EINVAL; 1833 } 1834 1835 if (wow) 1836 cfg = get_dle_mem_cfg(rtwdev, RTW89_QTA_WOW); 1837 else 1838 cfg = get_dle_mem_cfg(rtwdev, RTW89_QTA_SCC); 1839 if (!cfg) { 1840 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 1841 return -EINVAL; 1842 } 1843 1844 min_cfg = cfg->ple_min_qt; 1845 max_cfg = cfg->ple_max_qt; 1846 SET_QUOTA(cma0_dma, PLE, 6); 1847 SET_QUOTA(cma1_dma, PLE, 7); 1848 1849 return 0; 1850 } 1851 #undef SET_QUOTA 1852 1853 void rtw89_mac_hw_mgnt_sec(struct rtw89_dev *rtwdev, bool enable) 1854 { 1855 u32 msk32 = B_AX_UC_MGNT_DEC | B_AX_BMC_MGNT_DEC; 1856 1857 if (enable) 1858 rtw89_write32_set(rtwdev, R_AX_SEC_ENG_CTRL, msk32); 1859 else 1860 rtw89_write32_clr(rtwdev, R_AX_SEC_ENG_CTRL, msk32); 1861 } 1862 1863 static void dle_quota_cfg(struct rtw89_dev *rtwdev, 1864 const struct rtw89_dle_mem *cfg, 1865 u16 ext_wde_min_qt_wcpu) 1866 { 1867 wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu); 1868 ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt); 1869 } 1870 1871 static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode, 1872 enum rtw89_qta_mode ext_mode) 1873 { 1874 const struct rtw89_dle_mem *cfg, *ext_cfg; 1875 u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU; 1876 int ret = 0; 1877 u32 ini; 1878 1879 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1880 if (ret) 1881 return ret; 1882 1883 cfg = get_dle_mem_cfg(rtwdev, mode); 1884 if (!cfg) { 1885 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 1886 ret = -EINVAL; 1887 goto error; 1888 } 1889 1890 if (mode == RTW89_QTA_DLFW) { 1891 ext_cfg = get_dle_mem_cfg(rtwdev, ext_mode); 1892 if (!ext_cfg) { 1893 rtw89_err(rtwdev, "[ERR]get_dle_ext_mem_cfg %d\n", 1894 ext_mode); 1895 ret = -EINVAL; 1896 goto error; 1897 } 1898 ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu; 1899 } 1900 1901 if (dle_used_size(cfg->wde_size, cfg->ple_size) != 1902 dle_expected_used_size(rtwdev, mode)) { 1903 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 1904 ret = -EINVAL; 1905 goto error; 1906 } 1907 1908 dle_func_en(rtwdev, false); 1909 dle_clk_en(rtwdev, true); 1910 1911 ret = dle_mix_cfg(rtwdev, cfg); 1912 if (ret) { 1913 rtw89_err(rtwdev, "[ERR] dle mix cfg\n"); 1914 goto error; 1915 } 1916 dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu); 1917 1918 dle_func_en(rtwdev, true); 1919 1920 ret = read_poll_timeout(rtw89_read32, ini, 1921 (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, 1922 2000, false, rtwdev, R_AX_WDE_INI_STATUS); 1923 if (ret) { 1924 rtw89_err(rtwdev, "[ERR]WDE cfg ready\n"); 1925 return ret; 1926 } 1927 1928 ret = read_poll_timeout(rtw89_read32, ini, 1929 (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, 1930 2000, false, rtwdev, R_AX_PLE_INI_STATUS); 1931 if (ret) { 1932 rtw89_err(rtwdev, "[ERR]PLE cfg ready\n"); 1933 return ret; 1934 } 1935 1936 return 0; 1937 error: 1938 dle_func_en(rtwdev, false); 1939 rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n", 1940 rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS)); 1941 rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n", 1942 rtw89_read32(rtwdev, R_AX_PLE_INI_STATUS)); 1943 1944 return ret; 1945 } 1946 1947 static int preload_init_set(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx, 1948 enum rtw89_qta_mode mode) 1949 { 1950 u32 reg, max_preld_size, min_rsvd_size; 1951 1952 max_preld_size = (mac_idx == RTW89_MAC_0 ? 1953 PRELD_B0_ENT_NUM : PRELD_B1_ENT_NUM) * PRELD_AMSDU_SIZE; 1954 reg = mac_idx == RTW89_MAC_0 ? 1955 R_AX_TXPKTCTL_B0_PRELD_CFG0 : R_AX_TXPKTCTL_B1_PRELD_CFG0; 1956 rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_USEMAXSZ_MASK, max_preld_size); 1957 rtw89_write32_set(rtwdev, reg, B_AX_B0_PRELD_FEN); 1958 1959 min_rsvd_size = PRELD_AMSDU_SIZE; 1960 reg = mac_idx == RTW89_MAC_0 ? 1961 R_AX_TXPKTCTL_B0_PRELD_CFG1 : R_AX_TXPKTCTL_B1_PRELD_CFG1; 1962 rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_TXENDWIN_MASK, PRELD_NEXT_WND); 1963 rtw89_write32_mask(rtwdev, reg, B_AX_B0_PRELD_NXT_RSVMINSZ_MASK, min_rsvd_size); 1964 1965 return 0; 1966 } 1967 1968 static bool is_qta_poh(struct rtw89_dev *rtwdev) 1969 { 1970 return rtwdev->hci.type == RTW89_HCI_TYPE_PCIE; 1971 } 1972 1973 static int preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx, 1974 enum rtw89_qta_mode mode) 1975 { 1976 const struct rtw89_chip_info *chip = rtwdev->chip; 1977 1978 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || 1979 chip->chip_id == RTL8851B || !is_qta_poh(rtwdev)) 1980 return 0; 1981 1982 return preload_init_set(rtwdev, mac_idx, mode); 1983 } 1984 1985 static bool dle_is_txq_empty(struct rtw89_dev *rtwdev) 1986 { 1987 u32 msk32; 1988 u32 val32; 1989 1990 msk32 = B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC | B_AX_WDE_EMPTY_QUE_CMAC0_MBH | 1991 B_AX_WDE_EMPTY_QUE_CMAC1_MBH | B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 | 1992 B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 | B_AX_WDE_EMPTY_QUE_OTHERS | 1993 B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | B_AX_PLE_EMPTY_QTA_DMAC_H2C | 1994 B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | 1995 B_AX_WDE_EMPTY_QTA_DMAC_HIF | B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | 1996 B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | 1997 B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | 1998 B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL | 1999 B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX | 2000 B_AX_PLE_EMPTY_QTA_DMAC_CPUIO | 2001 B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU | 2002 B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU; 2003 val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0); 2004 2005 if ((val32 & msk32) == msk32) 2006 return true; 2007 2008 return false; 2009 } 2010 2011 static void _patch_ss2f_path(struct rtw89_dev *rtwdev) 2012 { 2013 const struct rtw89_chip_info *chip = rtwdev->chip; 2014 2015 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || 2016 chip->chip_id == RTL8851B) 2017 return; 2018 2019 rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK, 2020 SS2F_PATH_WLCPU); 2021 } 2022 2023 static int sta_sch_init(struct rtw89_dev *rtwdev) 2024 { 2025 u32 p_val; 2026 u8 val; 2027 int ret; 2028 2029 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2030 if (ret) 2031 return ret; 2032 2033 val = rtw89_read8(rtwdev, R_AX_SS_CTRL); 2034 val |= B_AX_SS_EN; 2035 rtw89_write8(rtwdev, R_AX_SS_CTRL, val); 2036 2037 ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_AX_SS_INIT_DONE_1, 2038 1, TRXCFG_WAIT_CNT, false, rtwdev, R_AX_SS_CTRL); 2039 if (ret) { 2040 rtw89_err(rtwdev, "[ERR]STA scheduler init\n"); 2041 return ret; 2042 } 2043 2044 rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG); 2045 rtw89_write32_clr(rtwdev, R_AX_SS_CTRL, B_AX_SS_NONEMPTY_SS2FINFO_EN); 2046 2047 _patch_ss2f_path(rtwdev); 2048 2049 return 0; 2050 } 2051 2052 static int mpdu_proc_init(struct rtw89_dev *rtwdev) 2053 { 2054 int ret; 2055 2056 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2057 if (ret) 2058 return ret; 2059 2060 rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); 2061 rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD); 2062 rtw89_write32_set(rtwdev, R_AX_MPDU_PROC, 2063 B_AX_APPEND_FCS | B_AX_A_ICV_ERR); 2064 rtw89_write32(rtwdev, R_AX_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL); 2065 2066 return 0; 2067 } 2068 2069 static int sec_eng_init(struct rtw89_dev *rtwdev) 2070 { 2071 const struct rtw89_chip_info *chip = rtwdev->chip; 2072 u32 val = 0; 2073 int ret; 2074 2075 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2076 if (ret) 2077 return ret; 2078 2079 val = rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL); 2080 /* init clock */ 2081 val |= (B_AX_CLK_EN_CGCMP | B_AX_CLK_EN_WAPI | B_AX_CLK_EN_WEP_TKIP); 2082 /* init TX encryption */ 2083 val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC); 2084 val |= (B_AX_MC_DEC | B_AX_BC_DEC); 2085 if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || 2086 chip->chip_id == RTL8851B) 2087 val &= ~B_AX_TX_PARTIAL_MODE; 2088 rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val); 2089 2090 /* init MIC ICV append */ 2091 val = rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC); 2092 val |= (B_AX_APPEND_ICV | B_AX_APPEND_MIC); 2093 2094 /* option init */ 2095 rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val); 2096 2097 if (chip->chip_id == RTL8852C) 2098 rtw89_write32_mask(rtwdev, R_AX_SEC_DEBUG1, 2099 B_AX_TX_TIMEOUT_SEL_MASK, AX_TX_TO_VAL); 2100 2101 return 0; 2102 } 2103 2104 static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2105 { 2106 int ret; 2107 2108 ret = dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID); 2109 if (ret) { 2110 rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret); 2111 return ret; 2112 } 2113 2114 ret = preload_init(rtwdev, RTW89_MAC_0, rtwdev->mac.qta_mode); 2115 if (ret) { 2116 rtw89_err(rtwdev, "[ERR]preload init %d\n", ret); 2117 return ret; 2118 } 2119 2120 ret = hfc_init(rtwdev, true, true, true); 2121 if (ret) { 2122 rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret); 2123 return ret; 2124 } 2125 2126 ret = sta_sch_init(rtwdev); 2127 if (ret) { 2128 rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret); 2129 return ret; 2130 } 2131 2132 ret = mpdu_proc_init(rtwdev); 2133 if (ret) { 2134 rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret); 2135 return ret; 2136 } 2137 2138 ret = sec_eng_init(rtwdev); 2139 if (ret) { 2140 rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret); 2141 return ret; 2142 } 2143 2144 return ret; 2145 } 2146 2147 static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2148 { 2149 u32 val, reg; 2150 u16 p_val; 2151 int ret; 2152 2153 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2154 if (ret) 2155 return ret; 2156 2157 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_ADDR_CAM_CTRL, mac_idx); 2158 2159 val = rtw89_read32(rtwdev, reg); 2160 val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) | 2161 B_AX_ADDR_CAM_CLR | B_AX_ADDR_CAM_EN; 2162 rtw89_write32(rtwdev, reg, val); 2163 2164 ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR), 2165 1, TRXCFG_WAIT_CNT, false, rtwdev, reg); 2166 if (ret) { 2167 rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n"); 2168 return ret; 2169 } 2170 2171 return 0; 2172 } 2173 2174 static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2175 { 2176 u32 ret; 2177 u32 reg; 2178 u32 val; 2179 2180 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2181 if (ret) 2182 return ret; 2183 2184 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PREBKF_CFG_1, mac_idx); 2185 if (rtwdev->chip->chip_id == RTL8852C) 2186 rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, 2187 SIFS_MACTXEN_T1_V1); 2188 else 2189 rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, 2190 SIFS_MACTXEN_T1); 2191 2192 if (rtwdev->chip->chip_id == RTL8852B || rtwdev->chip->chip_id == RTL8851B) { 2193 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCH_EXT_CTRL, mac_idx); 2194 rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV); 2195 } 2196 2197 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CCA_CFG_0, mac_idx); 2198 rtw89_write32_clr(rtwdev, reg, B_AX_BTCCA_EN); 2199 2200 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PREBKF_CFG_0, mac_idx); 2201 if (rtwdev->chip->chip_id == RTL8852C) { 2202 val = rtw89_read32_mask(rtwdev, R_AX_SEC_ENG_CTRL, 2203 B_AX_TX_PARTIAL_MODE); 2204 if (!val) 2205 rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, 2206 SCH_PREBKF_24US); 2207 } else { 2208 rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, 2209 SCH_PREBKF_24US); 2210 } 2211 2212 return 0; 2213 } 2214 2215 int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev, 2216 enum rtw89_machdr_frame_type type, 2217 enum rtw89_mac_fwd_target fwd_target, 2218 u8 mac_idx) 2219 { 2220 u32 reg; 2221 u32 val; 2222 2223 switch (fwd_target) { 2224 case RTW89_FWD_DONT_CARE: 2225 val = RX_FLTR_FRAME_DROP; 2226 break; 2227 case RTW89_FWD_TO_HOST: 2228 val = RX_FLTR_FRAME_TO_HOST; 2229 break; 2230 case RTW89_FWD_TO_WLAN_CPU: 2231 val = RX_FLTR_FRAME_TO_WLCPU; 2232 break; 2233 default: 2234 rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n"); 2235 return -EINVAL; 2236 } 2237 2238 switch (type) { 2239 case RTW89_MGNT: 2240 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MGNT_FLTR, mac_idx); 2241 break; 2242 case RTW89_CTRL: 2243 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTRL_FLTR, mac_idx); 2244 break; 2245 case RTW89_DATA: 2246 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_DATA_FLTR, mac_idx); 2247 break; 2248 default: 2249 rtw89_err(rtwdev, "[ERR]set rx filter type err\n"); 2250 return -EINVAL; 2251 } 2252 rtw89_write32(rtwdev, reg, val); 2253 2254 return 0; 2255 } 2256 2257 static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2258 { 2259 int ret, i; 2260 u32 mac_ftlr, plcp_ftlr; 2261 2262 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2263 if (ret) 2264 return ret; 2265 2266 for (i = RTW89_MGNT; i <= RTW89_DATA; i++) { 2267 ret = rtw89_mac_typ_fltr_opt(rtwdev, i, RTW89_FWD_TO_HOST, 2268 mac_idx); 2269 if (ret) 2270 return ret; 2271 } 2272 mac_ftlr = rtwdev->hal.rx_fltr; 2273 plcp_ftlr = B_AX_CCK_CRC_CHK | B_AX_CCK_SIG_CHK | 2274 B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK | 2275 B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK | 2276 B_AX_HE_SIGB_CRC_CHK; 2277 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_FLTR_OPT, mac_idx), 2278 mac_ftlr); 2279 rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_PLCP_HDR_FLTR, mac_idx), 2280 plcp_ftlr); 2281 2282 return 0; 2283 } 2284 2285 static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx) 2286 { 2287 u32 reg, val32; 2288 u32 b_rsp_chk_nav, b_rsp_chk_cca; 2289 2290 b_rsp_chk_nav = B_AX_RSP_CHK_TXNAV | B_AX_RSP_CHK_INTRA_NAV | 2291 B_AX_RSP_CHK_BASIC_NAV; 2292 b_rsp_chk_cca = B_AX_RSP_CHK_SEC_CCA_80 | B_AX_RSP_CHK_SEC_CCA_40 | 2293 B_AX_RSP_CHK_SEC_CCA_20 | B_AX_RSP_CHK_BTCCA | 2294 B_AX_RSP_CHK_EDCCA | B_AX_RSP_CHK_CCA; 2295 2296 switch (rtwdev->chip->chip_id) { 2297 case RTL8852A: 2298 case RTL8852B: 2299 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RSP_CHK_SIG, mac_idx); 2300 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav; 2301 rtw89_write32(rtwdev, reg, val32); 2302 2303 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); 2304 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca; 2305 rtw89_write32(rtwdev, reg, val32); 2306 break; 2307 default: 2308 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RSP_CHK_SIG, mac_idx); 2309 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav; 2310 rtw89_write32(rtwdev, reg, val32); 2311 2312 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); 2313 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca; 2314 rtw89_write32(rtwdev, reg, val32); 2315 break; 2316 } 2317 } 2318 2319 static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2320 { 2321 u32 val, reg; 2322 int ret; 2323 2324 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2325 if (ret) 2326 return ret; 2327 2328 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CCA_CONTROL, mac_idx); 2329 val = rtw89_read32(rtwdev, reg); 2330 val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA | 2331 B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 | 2332 B_AX_SIFS_CHK_BTCCA | B_AX_SIFS_CHK_CCA_P20 | 2333 B_AX_CTN_CHK_INTRA_NAV | 2334 B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA | 2335 B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 | 2336 B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 | 2337 B_AX_CTN_CHK_CCA_P20); 2338 val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 | 2339 B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 | 2340 B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 | 2341 B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV | 2342 B_AX_SIFS_CHK_EDCCA); 2343 2344 rtw89_write32(rtwdev, reg, val); 2345 2346 _patch_dis_resp_chk(rtwdev, mac_idx); 2347 2348 return 0; 2349 } 2350 2351 static int nav_ctrl_init(struct rtw89_dev *rtwdev) 2352 { 2353 rtw89_write32_set(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_PLCP_UP_NAV_EN | 2354 B_AX_WMAC_TF_UP_NAV_EN | 2355 B_AX_WMAC_NAV_UPPER_EN); 2356 rtw89_write32_mask(rtwdev, R_AX_WMAC_NAV_CTL, B_AX_WMAC_NAV_UPPER_MASK, NAV_25MS); 2357 2358 return 0; 2359 } 2360 2361 static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2362 { 2363 u32 reg; 2364 int ret; 2365 2366 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2367 if (ret) 2368 return ret; 2369 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_SR_CTRL, mac_idx); 2370 rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN); 2371 2372 return 0; 2373 } 2374 2375 static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2376 { 2377 u32 reg; 2378 int ret; 2379 2380 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2381 if (ret) 2382 return ret; 2383 2384 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_MAC_LOOPBACK, mac_idx); 2385 rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); 2386 2387 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TCR0, mac_idx); 2388 rtw89_write32_mask(rtwdev, reg, B_AX_TCR_UDF_THSD_MASK, TCR_UDF_THSD); 2389 2390 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXD_FIFO_CTRL, mac_idx); 2391 rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_HIGH_MCS_THRE_MASK, TXDFIFO_HIGH_MCS_THRE); 2392 rtw89_write32_mask(rtwdev, reg, B_AX_TXDFIFO_LOW_MCS_THRE_MASK, TXDFIFO_LOW_MCS_THRE); 2393 2394 return 0; 2395 } 2396 2397 static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2398 { 2399 const struct rtw89_chip_info *chip = rtwdev->chip; 2400 const struct rtw89_rrsr_cfgs *rrsr = chip->rrsr_cfgs; 2401 u32 reg, val, sifs; 2402 int ret; 2403 2404 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2405 if (ret) 2406 return ret; 2407 2408 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_0, mac_idx); 2409 val = rtw89_read32(rtwdev, reg); 2410 val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK; 2411 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK); 2412 2413 switch (rtwdev->chip->chip_id) { 2414 case RTL8852A: 2415 sifs = WMAC_SPEC_SIFS_OFDM_52A; 2416 break; 2417 case RTL8852B: 2418 sifs = WMAC_SPEC_SIFS_OFDM_52B; 2419 break; 2420 default: 2421 sifs = WMAC_SPEC_SIFS_OFDM_52C; 2422 break; 2423 } 2424 val &= ~B_AX_WMAC_SPEC_SIFS_OFDM_MASK; 2425 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs); 2426 rtw89_write32(rtwdev, reg, val); 2427 2428 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, mac_idx); 2429 rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN); 2430 2431 reg = rtw89_mac_reg_by_idx(rtwdev, rrsr->ref_rate.addr, mac_idx); 2432 rtw89_write32_mask(rtwdev, reg, rrsr->ref_rate.mask, rrsr->ref_rate.data); 2433 reg = rtw89_mac_reg_by_idx(rtwdev, rrsr->rsc.addr, mac_idx); 2434 rtw89_write32_mask(rtwdev, reg, rrsr->rsc.mask, rrsr->rsc.data); 2435 2436 return 0; 2437 } 2438 2439 static void rst_bacam(struct rtw89_dev *rtwdev) 2440 { 2441 u32 val32; 2442 int ret; 2443 2444 rtw89_write32_mask(rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK, 2445 S_AX_BACAM_RST_ALL); 2446 2447 ret = read_poll_timeout_atomic(rtw89_read32_mask, val32, val32 == 0, 2448 1, 1000, false, 2449 rtwdev, R_AX_RESPBA_CAM_CTRL, B_AX_BACAM_RST_MASK); 2450 if (ret) 2451 rtw89_warn(rtwdev, "failed to reset BA CAM\n"); 2452 } 2453 2454 static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2455 { 2456 #define TRXCFG_RMAC_CCA_TO 32 2457 #define TRXCFG_RMAC_DATA_TO 15 2458 #define RX_MAX_LEN_UNIT 512 2459 #define PLD_RLS_MAX_PG 127 2460 #define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT) 2461 int ret; 2462 u32 reg, rx_max_len, rx_qta; 2463 u16 val; 2464 2465 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2466 if (ret) 2467 return ret; 2468 2469 if (mac_idx == RTW89_MAC_0) 2470 rst_bacam(rtwdev); 2471 2472 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RESPBA_CAM_CTRL, mac_idx); 2473 rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); 2474 2475 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx); 2476 val = rtw89_read16(rtwdev, reg); 2477 val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO, 2478 B_AX_RX_DLK_DATA_TIME_MASK); 2479 val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO, 2480 B_AX_RX_DLK_CCA_TIME_MASK); 2481 rtw89_write16(rtwdev, reg, val); 2482 2483 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx); 2484 rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1); 2485 2486 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RX_FLTR_OPT, mac_idx); 2487 if (mac_idx == RTW89_MAC_0) 2488 rx_qta = rtwdev->mac.dle_info.c0_rx_qta; 2489 else 2490 rx_qta = rtwdev->mac.dle_info.c1_rx_qta; 2491 rx_qta = min_t(u32, rx_qta, PLD_RLS_MAX_PG); 2492 rx_max_len = rx_qta * rtwdev->mac.dle_info.ple_pg_size; 2493 rx_max_len = min_t(u32, rx_max_len, RX_SPEC_MAX_LEN); 2494 rx_max_len /= RX_MAX_LEN_UNIT; 2495 rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); 2496 2497 if (rtwdev->chip->chip_id == RTL8852A && 2498 rtwdev->hal.cv == CHIP_CBV) { 2499 rtw89_write16_mask(rtwdev, 2500 rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx), 2501 B_AX_RX_DLK_CCA_TIME_MASK, 0); 2502 rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx), 2503 BIT(12)); 2504 } 2505 2506 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PLCP_HDR_FLTR, mac_idx); 2507 rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK); 2508 2509 return ret; 2510 } 2511 2512 static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2513 { 2514 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2515 u32 val, reg; 2516 int ret; 2517 2518 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2519 if (ret) 2520 return ret; 2521 2522 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); 2523 val = rtw89_read32(rtwdev, reg); 2524 val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK); 2525 val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK); 2526 val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK); 2527 rtw89_write32(rtwdev, reg, val); 2528 2529 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2530 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_RRSR1, mac_idx); 2531 rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN); 2532 } 2533 2534 return 0; 2535 } 2536 2537 static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 2538 { 2539 const struct rtw89_dle_mem *cfg; 2540 2541 cfg = get_dle_mem_cfg(rtwdev, mode); 2542 if (!cfg) { 2543 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 2544 return false; 2545 } 2546 2547 return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma); 2548 } 2549 2550 static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2551 { 2552 u32 val, reg; 2553 int ret; 2554 2555 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2556 if (ret) 2557 return ret; 2558 2559 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 2560 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SIFS_SETTING, mac_idx); 2561 val = rtw89_read32(rtwdev, reg); 2562 val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, 2563 B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); 2564 val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, 2565 B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK); 2566 val |= B_AX_HW_CTS2SELF_EN; 2567 rtw89_write32(rtwdev, reg, val); 2568 2569 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_FSM_MON, mac_idx); 2570 val = rtw89_read32(rtwdev, reg); 2571 val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK); 2572 val &= ~B_AX_PTCL_TX_ARB_TO_MODE; 2573 rtw89_write32(rtwdev, reg, val); 2574 } 2575 2576 if (mac_idx == RTW89_MAC_0) { 2577 rtw89_write8_set(rtwdev, R_AX_PTCL_COMMON_SETTING_0, 2578 B_AX_CMAC_TX_MODE_0 | B_AX_CMAC_TX_MODE_1); 2579 rtw89_write8_clr(rtwdev, R_AX_PTCL_COMMON_SETTING_0, 2580 B_AX_PTCL_TRIGGER_SS_EN_0 | 2581 B_AX_PTCL_TRIGGER_SS_EN_1 | 2582 B_AX_PTCL_TRIGGER_SS_EN_UL); 2583 rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL, 2584 B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU); 2585 } else if (mac_idx == RTW89_MAC_1) { 2586 rtw89_write8_mask(rtwdev, R_AX_PTCLRPT_FULL_HDL_C1, 2587 B_AX_SPE_RPT_PATH_MASK, FWD_TO_WLCPU); 2588 } 2589 2590 return 0; 2591 } 2592 2593 static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2594 { 2595 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2596 u32 reg; 2597 int ret; 2598 2599 if (chip_id != RTL8852B) 2600 return 0; 2601 2602 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2603 if (ret) 2604 return ret; 2605 2606 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXDMA_CTRL_0, mac_idx); 2607 rtw89_write8_clr(rtwdev, reg, RX_FULL_MODE); 2608 2609 return 0; 2610 } 2611 2612 static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 2613 { 2614 int ret; 2615 2616 ret = scheduler_init(rtwdev, mac_idx); 2617 if (ret) { 2618 rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret); 2619 return ret; 2620 } 2621 2622 ret = addr_cam_init(rtwdev, mac_idx); 2623 if (ret) { 2624 rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx, 2625 ret); 2626 return ret; 2627 } 2628 2629 ret = rx_fltr_init(rtwdev, mac_idx); 2630 if (ret) { 2631 rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx, 2632 ret); 2633 return ret; 2634 } 2635 2636 ret = cca_ctrl_init(rtwdev, mac_idx); 2637 if (ret) { 2638 rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx, 2639 ret); 2640 return ret; 2641 } 2642 2643 ret = nav_ctrl_init(rtwdev); 2644 if (ret) { 2645 rtw89_err(rtwdev, "[ERR]CMAC%d NAV CTRL init %d\n", mac_idx, 2646 ret); 2647 return ret; 2648 } 2649 2650 ret = spatial_reuse_init(rtwdev, mac_idx); 2651 if (ret) { 2652 rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n", 2653 mac_idx, ret); 2654 return ret; 2655 } 2656 2657 ret = tmac_init(rtwdev, mac_idx); 2658 if (ret) { 2659 rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret); 2660 return ret; 2661 } 2662 2663 ret = trxptcl_init(rtwdev, mac_idx); 2664 if (ret) { 2665 rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret); 2666 return ret; 2667 } 2668 2669 ret = rmac_init(rtwdev, mac_idx); 2670 if (ret) { 2671 rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret); 2672 return ret; 2673 } 2674 2675 ret = cmac_com_init(rtwdev, mac_idx); 2676 if (ret) { 2677 rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret); 2678 return ret; 2679 } 2680 2681 ret = ptcl_init(rtwdev, mac_idx); 2682 if (ret) { 2683 rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret); 2684 return ret; 2685 } 2686 2687 ret = cmac_dma_init(rtwdev, mac_idx); 2688 if (ret) { 2689 rtw89_err(rtwdev, "[ERR]CMAC%d DMA init %d\n", mac_idx, ret); 2690 return ret; 2691 } 2692 2693 return ret; 2694 } 2695 2696 static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev, 2697 struct rtw89_mac_c2h_info *c2h_info) 2698 { 2699 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 2700 struct rtw89_mac_h2c_info h2c_info = {0}; 2701 u32 ret; 2702 2703 mac->cnv_efuse_state(rtwdev, false); 2704 2705 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE; 2706 h2c_info.content_len = 0; 2707 2708 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info); 2709 if (ret) 2710 goto out; 2711 2712 if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP) 2713 ret = -EINVAL; 2714 2715 out: 2716 mac->cnv_efuse_state(rtwdev, true); 2717 2718 return ret; 2719 } 2720 2721 int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev) 2722 { 2723 struct rtw89_efuse *efuse = &rtwdev->efuse; 2724 struct rtw89_hal *hal = &rtwdev->hal; 2725 const struct rtw89_chip_info *chip = rtwdev->chip; 2726 struct rtw89_mac_c2h_info c2h_info = {0}; 2727 const struct rtw89_c2hreg_phycap *phycap; 2728 u8 tx_nss; 2729 u8 rx_nss; 2730 u8 tx_ant; 2731 u8 rx_ant; 2732 u32 ret; 2733 2734 ret = rtw89_mac_read_phycap(rtwdev, &c2h_info); 2735 if (ret) 2736 return ret; 2737 2738 phycap = &c2h_info.u.phycap; 2739 2740 tx_nss = u32_get_bits(phycap->w1, RTW89_C2HREG_PHYCAP_W1_TX_NSS); 2741 rx_nss = u32_get_bits(phycap->w0, RTW89_C2HREG_PHYCAP_W0_RX_NSS); 2742 tx_ant = u32_get_bits(phycap->w3, RTW89_C2HREG_PHYCAP_W3_ANT_TX_NUM); 2743 rx_ant = u32_get_bits(phycap->w3, RTW89_C2HREG_PHYCAP_W3_ANT_RX_NUM); 2744 2745 hal->tx_nss = tx_nss ? min_t(u8, tx_nss, chip->tx_nss) : chip->tx_nss; 2746 hal->rx_nss = rx_nss ? min_t(u8, rx_nss, chip->rx_nss) : chip->rx_nss; 2747 2748 if (tx_ant == 1) 2749 hal->antenna_tx = RF_B; 2750 if (rx_ant == 1) 2751 hal->antenna_rx = RF_B; 2752 2753 if (tx_nss == 1 && tx_ant == 2 && rx_ant == 2) { 2754 hal->antenna_tx = RF_B; 2755 hal->tx_path_diversity = true; 2756 } 2757 2758 if (chip->rf_path_num == 1) { 2759 hal->antenna_tx = RF_A; 2760 hal->antenna_rx = RF_A; 2761 if ((efuse->rfe_type % 3) == 2) 2762 hal->ant_diversity = true; 2763 } 2764 2765 rtw89_debug(rtwdev, RTW89_DBG_FW, 2766 "phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n", 2767 hal->tx_nss, tx_nss, chip->tx_nss, 2768 hal->rx_nss, rx_nss, chip->rx_nss); 2769 rtw89_debug(rtwdev, RTW89_DBG_FW, 2770 "ant num/bitmap: tx=%d/0x%x rx=%d/0x%x\n", 2771 tx_ant, hal->antenna_tx, rx_ant, hal->antenna_rx); 2772 rtw89_debug(rtwdev, RTW89_DBG_FW, "TX path diversity=%d\n", hal->tx_path_diversity); 2773 rtw89_debug(rtwdev, RTW89_DBG_FW, "Antenna diversity=%d\n", hal->ant_diversity); 2774 2775 return 0; 2776 } 2777 2778 static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, 2779 u16 tx_en_u16, u16 mask_u16) 2780 { 2781 u32 ret; 2782 struct rtw89_mac_c2h_info c2h_info = {0}; 2783 struct rtw89_mac_h2c_info h2c_info = {0}; 2784 struct rtw89_h2creg_sch_tx_en *sch_tx_en = &h2c_info.u.sch_tx_en; 2785 2786 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN; 2787 h2c_info.content_len = sizeof(*sch_tx_en) - RTW89_H2CREG_HDR_LEN; 2788 2789 u32p_replace_bits(&sch_tx_en->w0, tx_en_u16, RTW89_H2CREG_SCH_TX_EN_W0_EN); 2790 u32p_replace_bits(&sch_tx_en->w1, mask_u16, RTW89_H2CREG_SCH_TX_EN_W1_MASK); 2791 u32p_replace_bits(&sch_tx_en->w1, band, RTW89_H2CREG_SCH_TX_EN_W1_BAND); 2792 2793 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); 2794 if (ret) 2795 return ret; 2796 2797 if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT) 2798 return -EINVAL; 2799 2800 return 0; 2801 } 2802 2803 static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, 2804 u16 tx_en, u16 tx_en_mask) 2805 { 2806 u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_TXEN, mac_idx); 2807 u16 val; 2808 int ret; 2809 2810 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2811 if (ret) 2812 return ret; 2813 2814 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 2815 return rtw89_hw_sch_tx_en_h2c(rtwdev, mac_idx, 2816 tx_en, tx_en_mask); 2817 2818 val = rtw89_read16(rtwdev, reg); 2819 val = (val & ~tx_en_mask) | (tx_en & tx_en_mask); 2820 rtw89_write16(rtwdev, reg, val); 2821 2822 return 0; 2823 } 2824 2825 static int rtw89_set_hw_sch_tx_en_v1(struct rtw89_dev *rtwdev, u8 mac_idx, 2826 u32 tx_en, u32 tx_en_mask) 2827 { 2828 u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_DRV_TXEN, mac_idx); 2829 u32 val; 2830 int ret; 2831 2832 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2833 if (ret) 2834 return ret; 2835 2836 val = rtw89_read32(rtwdev, reg); 2837 val = (val & ~tx_en_mask) | (tx_en & tx_en_mask); 2838 rtw89_write32(rtwdev, reg, val); 2839 2840 return 0; 2841 } 2842 2843 int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, 2844 u32 *tx_en, enum rtw89_sch_tx_sel sel) 2845 { 2846 int ret; 2847 2848 *tx_en = rtw89_read16(rtwdev, 2849 rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_TXEN, mac_idx)); 2850 2851 switch (sel) { 2852 case RTW89_SCH_TX_SEL_ALL: 2853 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 2854 B_AX_CTN_TXEN_ALL_MASK); 2855 if (ret) 2856 return ret; 2857 break; 2858 case RTW89_SCH_TX_SEL_HIQ: 2859 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 2860 0, B_AX_CTN_TXEN_HGQ); 2861 if (ret) 2862 return ret; 2863 break; 2864 case RTW89_SCH_TX_SEL_MG0: 2865 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 2866 0, B_AX_CTN_TXEN_MGQ); 2867 if (ret) 2868 return ret; 2869 break; 2870 case RTW89_SCH_TX_SEL_MACID: 2871 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 2872 B_AX_CTN_TXEN_ALL_MASK); 2873 if (ret) 2874 return ret; 2875 break; 2876 default: 2877 return 0; 2878 } 2879 2880 return 0; 2881 } 2882 EXPORT_SYMBOL(rtw89_mac_stop_sch_tx); 2883 2884 int rtw89_mac_stop_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, 2885 u32 *tx_en, enum rtw89_sch_tx_sel sel) 2886 { 2887 int ret; 2888 2889 *tx_en = rtw89_read32(rtwdev, 2890 rtw89_mac_reg_by_idx(rtwdev, R_AX_CTN_DRV_TXEN, mac_idx)); 2891 2892 switch (sel) { 2893 case RTW89_SCH_TX_SEL_ALL: 2894 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0, 2895 B_AX_CTN_TXEN_ALL_MASK_V1); 2896 if (ret) 2897 return ret; 2898 break; 2899 case RTW89_SCH_TX_SEL_HIQ: 2900 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 2901 0, B_AX_CTN_TXEN_HGQ); 2902 if (ret) 2903 return ret; 2904 break; 2905 case RTW89_SCH_TX_SEL_MG0: 2906 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 2907 0, B_AX_CTN_TXEN_MGQ); 2908 if (ret) 2909 return ret; 2910 break; 2911 case RTW89_SCH_TX_SEL_MACID: 2912 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, 0, 2913 B_AX_CTN_TXEN_ALL_MASK_V1); 2914 if (ret) 2915 return ret; 2916 break; 2917 default: 2918 return 0; 2919 } 2920 2921 return 0; 2922 } 2923 EXPORT_SYMBOL(rtw89_mac_stop_sch_tx_v1); 2924 2925 int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en) 2926 { 2927 int ret; 2928 2929 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, B_AX_CTN_TXEN_ALL_MASK); 2930 if (ret) 2931 return ret; 2932 2933 return 0; 2934 } 2935 EXPORT_SYMBOL(rtw89_mac_resume_sch_tx); 2936 2937 int rtw89_mac_resume_sch_tx_v1(struct rtw89_dev *rtwdev, u8 mac_idx, u32 tx_en) 2938 { 2939 int ret; 2940 2941 ret = rtw89_set_hw_sch_tx_en_v1(rtwdev, mac_idx, tx_en, 2942 B_AX_CTN_TXEN_ALL_MASK_V1); 2943 if (ret) 2944 return ret; 2945 2946 return 0; 2947 } 2948 EXPORT_SYMBOL(rtw89_mac_resume_sch_tx_v1); 2949 2950 int rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, bool wd, u16 *pkt_id) 2951 { 2952 u32 val, reg; 2953 int ret; 2954 2955 reg = wd ? R_AX_WD_BUF_REQ : R_AX_PL_BUF_REQ; 2956 val = buf_len; 2957 val |= B_AX_WD_BUF_REQ_EXEC; 2958 rtw89_write32(rtwdev, reg, val); 2959 2960 reg = wd ? R_AX_WD_BUF_STATUS : R_AX_PL_BUF_STATUS; 2961 2962 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_BUF_STAT_DONE, 2963 1, 2000, false, rtwdev, reg); 2964 if (ret) 2965 return ret; 2966 2967 *pkt_id = FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val); 2968 if (*pkt_id == S_WD_BUF_STAT_PKTID_INVALID) 2969 return -ENOENT; 2970 2971 return 0; 2972 } 2973 2974 int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev, 2975 struct rtw89_cpuio_ctrl *ctrl_para, bool wd) 2976 { 2977 u32 val, cmd_type, reg; 2978 int ret; 2979 2980 cmd_type = ctrl_para->cmd_type; 2981 2982 reg = wd ? R_AX_WD_CPUQ_OP_2 : R_AX_PL_CPUQ_OP_2; 2983 val = 0; 2984 val = u32_replace_bits(val, ctrl_para->start_pktid, 2985 B_AX_WD_CPUQ_OP_STRT_PKTID_MASK); 2986 val = u32_replace_bits(val, ctrl_para->end_pktid, 2987 B_AX_WD_CPUQ_OP_END_PKTID_MASK); 2988 rtw89_write32(rtwdev, reg, val); 2989 2990 reg = wd ? R_AX_WD_CPUQ_OP_1 : R_AX_PL_CPUQ_OP_1; 2991 val = 0; 2992 val = u32_replace_bits(val, ctrl_para->src_pid, 2993 B_AX_CPUQ_OP_SRC_PID_MASK); 2994 val = u32_replace_bits(val, ctrl_para->src_qid, 2995 B_AX_CPUQ_OP_SRC_QID_MASK); 2996 val = u32_replace_bits(val, ctrl_para->dst_pid, 2997 B_AX_CPUQ_OP_DST_PID_MASK); 2998 val = u32_replace_bits(val, ctrl_para->dst_qid, 2999 B_AX_CPUQ_OP_DST_QID_MASK); 3000 rtw89_write32(rtwdev, reg, val); 3001 3002 reg = wd ? R_AX_WD_CPUQ_OP_0 : R_AX_PL_CPUQ_OP_0; 3003 val = 0; 3004 val = u32_replace_bits(val, cmd_type, 3005 B_AX_CPUQ_OP_CMD_TYPE_MASK); 3006 val = u32_replace_bits(val, ctrl_para->macid, 3007 B_AX_CPUQ_OP_MACID_MASK); 3008 val = u32_replace_bits(val, ctrl_para->pkt_num, 3009 B_AX_CPUQ_OP_PKTNUM_MASK); 3010 val |= B_AX_WD_CPUQ_OP_EXEC; 3011 rtw89_write32(rtwdev, reg, val); 3012 3013 reg = wd ? R_AX_WD_CPUQ_OP_STATUS : R_AX_PL_CPUQ_OP_STATUS; 3014 3015 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_CPUQ_OP_STAT_DONE, 3016 1, 2000, false, rtwdev, reg); 3017 if (ret) 3018 return ret; 3019 3020 if (cmd_type == CPUIO_OP_CMD_GET_1ST_PID || 3021 cmd_type == CPUIO_OP_CMD_GET_NEXT_PID) 3022 ctrl_para->pktid = FIELD_GET(B_AX_WD_CPUQ_OP_PKTID_MASK, val); 3023 3024 return 0; 3025 } 3026 3027 static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 3028 { 3029 const struct rtw89_dle_mem *cfg; 3030 struct rtw89_cpuio_ctrl ctrl_para = {0}; 3031 u16 pkt_id; 3032 int ret; 3033 3034 cfg = get_dle_mem_cfg(rtwdev, mode); 3035 if (!cfg) { 3036 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 3037 return -EINVAL; 3038 } 3039 3040 if (dle_used_size(cfg->wde_size, cfg->ple_size) != 3041 dle_expected_used_size(rtwdev, mode)) { 3042 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 3043 return -EINVAL; 3044 } 3045 3046 dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU); 3047 3048 ret = rtw89_mac_dle_buf_req(rtwdev, 0x20, true, &pkt_id); 3049 if (ret) { 3050 rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n"); 3051 return ret; 3052 } 3053 3054 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 3055 ctrl_para.start_pktid = pkt_id; 3056 ctrl_para.end_pktid = pkt_id; 3057 ctrl_para.pkt_num = 0; 3058 ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS; 3059 ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT; 3060 ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true); 3061 if (ret) { 3062 rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n"); 3063 return -EFAULT; 3064 } 3065 3066 ret = rtw89_mac_dle_buf_req(rtwdev, 0x20, false, &pkt_id); 3067 if (ret) { 3068 rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n"); 3069 return ret; 3070 } 3071 3072 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 3073 ctrl_para.start_pktid = pkt_id; 3074 ctrl_para.end_pktid = pkt_id; 3075 ctrl_para.pkt_num = 0; 3076 ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS; 3077 ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT; 3078 ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, false); 3079 if (ret) { 3080 rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n"); 3081 return -EFAULT; 3082 } 3083 3084 return 0; 3085 } 3086 3087 static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx) 3088 { 3089 int ret; 3090 u32 reg; 3091 u8 val; 3092 3093 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3094 if (ret) 3095 return ret; 3096 3097 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_TX_CTN_SEL, mac_idx); 3098 3099 ret = read_poll_timeout(rtw89_read8, val, 3100 (val & B_AX_PTCL_TX_ON_STAT) == 0, 3101 SW_CVR_DUR_US, 3102 SW_CVR_DUR_US * PTCL_IDLE_POLL_CNT, 3103 false, rtwdev, reg); 3104 if (ret) 3105 return ret; 3106 3107 return 0; 3108 } 3109 3110 static int band1_enable(struct rtw89_dev *rtwdev) 3111 { 3112 int ret, i; 3113 u32 sleep_bak[4] = {0}; 3114 u32 pause_bak[4] = {0}; 3115 u32 tx_en; 3116 3117 ret = rtw89_chip_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL); 3118 if (ret) { 3119 rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret); 3120 return ret; 3121 } 3122 3123 for (i = 0; i < 4; i++) { 3124 sleep_bak[i] = rtw89_read32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4); 3125 pause_bak[i] = rtw89_read32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4); 3126 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, U32_MAX); 3127 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, U32_MAX); 3128 } 3129 3130 ret = band_idle_ck_b(rtwdev, 0); 3131 if (ret) { 3132 rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret); 3133 return ret; 3134 } 3135 3136 ret = dle_quota_change(rtwdev, rtwdev->mac.qta_mode); 3137 if (ret) { 3138 rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret); 3139 return ret; 3140 } 3141 3142 for (i = 0; i < 4; i++) { 3143 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, sleep_bak[i]); 3144 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]); 3145 } 3146 3147 ret = rtw89_chip_resume_sch_tx(rtwdev, 0, tx_en); 3148 if (ret) { 3149 rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret); 3150 return ret; 3151 } 3152 3153 ret = cmac_func_en(rtwdev, 1, true); 3154 if (ret) { 3155 rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret); 3156 return ret; 3157 } 3158 3159 ret = cmac_init(rtwdev, 1); 3160 if (ret) { 3161 rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret); 3162 return ret; 3163 } 3164 3165 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 3166 B_AX_R_SYM_FEN_WLBBFUN_1 | B_AX_R_SYM_FEN_WLBBGLB_1); 3167 3168 return 0; 3169 } 3170 3171 static void rtw89_wdrls_imr_enable(struct rtw89_dev *rtwdev) 3172 { 3173 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3174 3175 rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, B_AX_WDRLS_IMR_EN_CLR); 3176 rtw89_write32_set(rtwdev, R_AX_WDRLS_ERR_IMR, imr->wdrls_imr_set); 3177 } 3178 3179 static void rtw89_wsec_imr_enable(struct rtw89_dev *rtwdev) 3180 { 3181 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3182 3183 rtw89_write32_set(rtwdev, imr->wsec_imr_reg, imr->wsec_imr_set); 3184 } 3185 3186 static void rtw89_mpdu_trx_imr_enable(struct rtw89_dev *rtwdev) 3187 { 3188 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3189 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3190 3191 rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR, 3192 B_AX_TX_GET_ERRPKTID_INT_EN | 3193 B_AX_TX_NXT_ERRPKTID_INT_EN | 3194 B_AX_TX_MPDU_SIZE_ZERO_INT_EN | 3195 B_AX_TX_OFFSET_ERR_INT_EN | 3196 B_AX_TX_HDR3_SIZE_ERR_INT_EN); 3197 if (chip_id == RTL8852C) 3198 rtw89_write32_clr(rtwdev, R_AX_MPDU_TX_ERR_IMR, 3199 B_AX_TX_ETH_TYPE_ERR_EN | 3200 B_AX_TX_LLC_PRE_ERR_EN | 3201 B_AX_TX_NW_TYPE_ERR_EN | 3202 B_AX_TX_KSRCH_ERR_EN); 3203 rtw89_write32_set(rtwdev, R_AX_MPDU_TX_ERR_IMR, 3204 imr->mpdu_tx_imr_set); 3205 3206 rtw89_write32_clr(rtwdev, R_AX_MPDU_RX_ERR_IMR, 3207 B_AX_GETPKTID_ERR_INT_EN | 3208 B_AX_MHDRLEN_ERR_INT_EN | 3209 B_AX_RPT_ERR_INT_EN); 3210 rtw89_write32_set(rtwdev, R_AX_MPDU_RX_ERR_IMR, 3211 imr->mpdu_rx_imr_set); 3212 } 3213 3214 static void rtw89_sta_sch_imr_enable(struct rtw89_dev *rtwdev) 3215 { 3216 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3217 3218 rtw89_write32_clr(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR, 3219 B_AX_SEARCH_HANG_TIMEOUT_INT_EN | 3220 B_AX_RPT_HANG_TIMEOUT_INT_EN | 3221 B_AX_PLE_B_PKTID_ERR_INT_EN); 3222 rtw89_write32_set(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR, 3223 imr->sta_sch_imr_set); 3224 } 3225 3226 static void rtw89_txpktctl_imr_enable(struct rtw89_dev *rtwdev) 3227 { 3228 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3229 3230 rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b0_reg, 3231 imr->txpktctl_imr_b0_clr); 3232 rtw89_write32_set(rtwdev, imr->txpktctl_imr_b0_reg, 3233 imr->txpktctl_imr_b0_set); 3234 rtw89_write32_clr(rtwdev, imr->txpktctl_imr_b1_reg, 3235 imr->txpktctl_imr_b1_clr); 3236 rtw89_write32_set(rtwdev, imr->txpktctl_imr_b1_reg, 3237 imr->txpktctl_imr_b1_set); 3238 } 3239 3240 static void rtw89_wde_imr_enable(struct rtw89_dev *rtwdev) 3241 { 3242 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3243 3244 rtw89_write32_clr(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_clr); 3245 rtw89_write32_set(rtwdev, R_AX_WDE_ERR_IMR, imr->wde_imr_set); 3246 } 3247 3248 static void rtw89_ple_imr_enable(struct rtw89_dev *rtwdev) 3249 { 3250 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3251 3252 rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_clr); 3253 rtw89_write32_set(rtwdev, R_AX_PLE_ERR_IMR, imr->ple_imr_set); 3254 } 3255 3256 static void rtw89_pktin_imr_enable(struct rtw89_dev *rtwdev) 3257 { 3258 rtw89_write32_set(rtwdev, R_AX_PKTIN_ERR_IMR, 3259 B_AX_PKTIN_GETPKTID_ERR_INT_EN); 3260 } 3261 3262 static void rtw89_dispatcher_imr_enable(struct rtw89_dev *rtwdev) 3263 { 3264 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3265 3266 rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, 3267 imr->host_disp_imr_clr); 3268 rtw89_write32_set(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, 3269 imr->host_disp_imr_set); 3270 rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, 3271 imr->cpu_disp_imr_clr); 3272 rtw89_write32_set(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, 3273 imr->cpu_disp_imr_set); 3274 rtw89_write32_clr(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR, 3275 imr->other_disp_imr_clr); 3276 rtw89_write32_set(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR, 3277 imr->other_disp_imr_set); 3278 } 3279 3280 static void rtw89_cpuio_imr_enable(struct rtw89_dev *rtwdev) 3281 { 3282 rtw89_write32_clr(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_CLR); 3283 rtw89_write32_set(rtwdev, R_AX_CPUIO_ERR_IMR, B_AX_CPUIO_IMR_SET); 3284 } 3285 3286 static void rtw89_bbrpt_imr_enable(struct rtw89_dev *rtwdev) 3287 { 3288 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3289 3290 rtw89_write32_set(rtwdev, imr->bbrpt_com_err_imr_reg, 3291 B_AX_BBRPT_COM_NULL_PLPKTID_ERR_INT_EN); 3292 rtw89_write32_clr(rtwdev, imr->bbrpt_chinfo_err_imr_reg, 3293 B_AX_BBRPT_CHINFO_IMR_CLR); 3294 rtw89_write32_set(rtwdev, imr->bbrpt_chinfo_err_imr_reg, 3295 imr->bbrpt_err_imr_set); 3296 rtw89_write32_set(rtwdev, imr->bbrpt_dfs_err_imr_reg, 3297 B_AX_BBRPT_DFS_TO_ERR_INT_EN); 3298 rtw89_write32_set(rtwdev, R_AX_LA_ERRFLAG, B_AX_LA_IMR_DATA_LOSS_ERR); 3299 } 3300 3301 static void rtw89_scheduler_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3302 { 3303 u32 reg; 3304 3305 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCHEDULE_ERR_IMR, mac_idx); 3306 rtw89_write32_clr(rtwdev, reg, B_AX_SORT_NON_IDLE_ERR_INT_EN | 3307 B_AX_FSM_TIMEOUT_ERR_INT_EN); 3308 rtw89_write32_set(rtwdev, reg, B_AX_FSM_TIMEOUT_ERR_INT_EN); 3309 } 3310 3311 static void rtw89_ptcl_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3312 { 3313 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3314 u32 reg; 3315 3316 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_IMR0, mac_idx); 3317 rtw89_write32_clr(rtwdev, reg, imr->ptcl_imr_clr); 3318 rtw89_write32_set(rtwdev, reg, imr->ptcl_imr_set); 3319 } 3320 3321 static void rtw89_cdma_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3322 { 3323 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3324 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3325 u32 reg; 3326 3327 reg = rtw89_mac_reg_by_idx(rtwdev, imr->cdma_imr_0_reg, mac_idx); 3328 rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_0_clr); 3329 rtw89_write32_set(rtwdev, reg, imr->cdma_imr_0_set); 3330 3331 if (chip_id == RTL8852C) { 3332 reg = rtw89_mac_reg_by_idx(rtwdev, imr->cdma_imr_1_reg, mac_idx); 3333 rtw89_write32_clr(rtwdev, reg, imr->cdma_imr_1_clr); 3334 rtw89_write32_set(rtwdev, reg, imr->cdma_imr_1_set); 3335 } 3336 } 3337 3338 static void rtw89_phy_intf_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3339 { 3340 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3341 u32 reg; 3342 3343 reg = rtw89_mac_reg_by_idx(rtwdev, imr->phy_intf_imr_reg, mac_idx); 3344 rtw89_write32_clr(rtwdev, reg, imr->phy_intf_imr_clr); 3345 rtw89_write32_set(rtwdev, reg, imr->phy_intf_imr_set); 3346 } 3347 3348 static void rtw89_rmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3349 { 3350 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3351 u32 reg; 3352 3353 reg = rtw89_mac_reg_by_idx(rtwdev, imr->rmac_imr_reg, mac_idx); 3354 rtw89_write32_clr(rtwdev, reg, imr->rmac_imr_clr); 3355 rtw89_write32_set(rtwdev, reg, imr->rmac_imr_set); 3356 } 3357 3358 static void rtw89_tmac_imr_enable(struct rtw89_dev *rtwdev, u8 mac_idx) 3359 { 3360 const struct rtw89_imr_info *imr = rtwdev->chip->imr_info; 3361 u32 reg; 3362 3363 reg = rtw89_mac_reg_by_idx(rtwdev, imr->tmac_imr_reg, mac_idx); 3364 rtw89_write32_clr(rtwdev, reg, imr->tmac_imr_clr); 3365 rtw89_write32_set(rtwdev, reg, imr->tmac_imr_set); 3366 } 3367 3368 static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, 3369 enum rtw89_mac_hwmod_sel sel) 3370 { 3371 int ret; 3372 3373 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel); 3374 if (ret) { 3375 rtw89_err(rtwdev, "MAC%d mac_idx%d is not ready\n", 3376 sel, mac_idx); 3377 return ret; 3378 } 3379 3380 if (sel == RTW89_DMAC_SEL) { 3381 rtw89_wdrls_imr_enable(rtwdev); 3382 rtw89_wsec_imr_enable(rtwdev); 3383 rtw89_mpdu_trx_imr_enable(rtwdev); 3384 rtw89_sta_sch_imr_enable(rtwdev); 3385 rtw89_txpktctl_imr_enable(rtwdev); 3386 rtw89_wde_imr_enable(rtwdev); 3387 rtw89_ple_imr_enable(rtwdev); 3388 rtw89_pktin_imr_enable(rtwdev); 3389 rtw89_dispatcher_imr_enable(rtwdev); 3390 rtw89_cpuio_imr_enable(rtwdev); 3391 rtw89_bbrpt_imr_enable(rtwdev); 3392 } else if (sel == RTW89_CMAC_SEL) { 3393 rtw89_scheduler_imr_enable(rtwdev, mac_idx); 3394 rtw89_ptcl_imr_enable(rtwdev, mac_idx); 3395 rtw89_cdma_imr_enable(rtwdev, mac_idx); 3396 rtw89_phy_intf_imr_enable(rtwdev, mac_idx); 3397 rtw89_rmac_imr_enable(rtwdev, mac_idx); 3398 rtw89_tmac_imr_enable(rtwdev, mac_idx); 3399 } else { 3400 return -EINVAL; 3401 } 3402 3403 return 0; 3404 } 3405 3406 static void rtw89_mac_err_imr_ctrl(struct rtw89_dev *rtwdev, bool en) 3407 { 3408 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3409 3410 rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR, 3411 en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS); 3412 rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR, 3413 en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS); 3414 if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta) 3415 rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1, 3416 en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS); 3417 } 3418 3419 static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable) 3420 { 3421 int ret = 0; 3422 3423 if (enable) { 3424 ret = band1_enable(rtwdev); 3425 if (ret) { 3426 rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret); 3427 return ret; 3428 } 3429 3430 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL); 3431 if (ret) { 3432 rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret); 3433 return ret; 3434 } 3435 } else { 3436 rtw89_err(rtwdev, "[ERR] disable dbcc is not implemented not\n"); 3437 return -EINVAL; 3438 } 3439 3440 return 0; 3441 } 3442 3443 static int set_host_rpr(struct rtw89_dev *rtwdev) 3444 { 3445 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 3446 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 3447 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_POH); 3448 rtw89_write32_set(rtwdev, R_AX_RLSRPT0_CFG0, 3449 B_AX_RLSRPT0_FLTR_MAP_MASK); 3450 } else { 3451 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 3452 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_STF); 3453 rtw89_write32_clr(rtwdev, R_AX_RLSRPT0_CFG0, 3454 B_AX_RLSRPT0_FLTR_MAP_MASK); 3455 } 3456 3457 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_AGGNUM_MASK, 30); 3458 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_TO_MASK, 255); 3459 3460 return 0; 3461 } 3462 3463 static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev) 3464 { 3465 enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode; 3466 int ret; 3467 3468 ret = dmac_init(rtwdev, 0); 3469 if (ret) { 3470 rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret); 3471 return ret; 3472 } 3473 3474 ret = cmac_init(rtwdev, 0); 3475 if (ret) { 3476 rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret); 3477 return ret; 3478 } 3479 3480 if (is_qta_dbcc(rtwdev, qta_mode)) { 3481 ret = rtw89_mac_dbcc_enable(rtwdev, true); 3482 if (ret) { 3483 rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret); 3484 return ret; 3485 } 3486 } 3487 3488 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 3489 if (ret) { 3490 rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret); 3491 return ret; 3492 } 3493 3494 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 3495 if (ret) { 3496 rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret); 3497 return ret; 3498 } 3499 3500 rtw89_mac_err_imr_ctrl(rtwdev, true); 3501 3502 ret = set_host_rpr(rtwdev); 3503 if (ret) { 3504 rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret); 3505 return ret; 3506 } 3507 3508 return 0; 3509 } 3510 3511 static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev) 3512 { 3513 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3514 u32 val32; 3515 3516 if (chip_id == RTL8852B || chip_id == RTL8851B) { 3517 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN); 3518 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN); 3519 return; 3520 } 3521 3522 rtw89_mac_mem_write(rtwdev, R_AX_WDT_CTRL, 3523 WDT_CTRL_ALL_DIS, RTW89_MAC_MEM_CPU_LOCAL); 3524 3525 val32 = rtw89_mac_mem_read(rtwdev, R_AX_WDT_STATUS, RTW89_MAC_MEM_CPU_LOCAL); 3526 val32 |= B_AX_FS_WDT_INT; 3527 val32 &= ~B_AX_FS_WDT_INT_MSK; 3528 rtw89_mac_mem_write(rtwdev, R_AX_WDT_STATUS, val32, RTW89_MAC_MEM_CPU_LOCAL); 3529 } 3530 3531 static void rtw89_mac_disable_cpu_ax(struct rtw89_dev *rtwdev) 3532 { 3533 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 3534 3535 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 3536 rtw89_write32_clr(rtwdev, R_AX_WCPU_FW_CTRL, B_AX_WCPU_FWDL_EN | 3537 B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); 3538 rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 3539 3540 rtw89_disable_fw_watchdog(rtwdev); 3541 3542 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); 3543 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_PLATFORM_EN); 3544 } 3545 3546 static int rtw89_mac_enable_cpu_ax(struct rtw89_dev *rtwdev, u8 boot_reason, 3547 bool dlfw, bool include_bb) 3548 { 3549 u32 val; 3550 int ret; 3551 3552 if (rtw89_read32(rtwdev, R_AX_PLATFORM_ENABLE) & B_AX_WCPU_EN) 3553 return -EFAULT; 3554 3555 rtw89_write32(rtwdev, R_AX_UDM1, 0); 3556 rtw89_write32(rtwdev, R_AX_UDM2, 0); 3557 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 3558 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 3559 rtw89_write32(rtwdev, R_AX_HALT_H2C, 0); 3560 rtw89_write32(rtwdev, R_AX_HALT_C2H, 0); 3561 3562 rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 3563 3564 val = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 3565 val &= ~(B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); 3566 val = u32_replace_bits(val, RTW89_FWDL_INITIAL_STATE, 3567 B_AX_WCPU_FWDL_STS_MASK); 3568 3569 if (dlfw) 3570 val |= B_AX_WCPU_FWDL_EN; 3571 3572 rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val); 3573 3574 if (rtwdev->chip->chip_id == RTL8852B) 3575 rtw89_write32_mask(rtwdev, R_AX_SEC_CTRL, 3576 B_AX_SEC_IDMEM_SIZE_CONFIG_MASK, 0x2); 3577 3578 rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK, 3579 boot_reason); 3580 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 3581 3582 if (!dlfw) { 3583 mdelay(5); 3584 3585 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 3586 if (ret) 3587 return ret; 3588 } 3589 3590 return 0; 3591 } 3592 3593 static int rtw89_mac_dmac_pre_init(struct rtw89_dev *rtwdev) 3594 { 3595 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3596 u32 val; 3597 int ret; 3598 3599 if (chip_id == RTL8852C) 3600 val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | 3601 B_AX_PKT_BUF_EN | B_AX_H_AXIDMA_EN; 3602 else 3603 val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | 3604 B_AX_PKT_BUF_EN; 3605 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val); 3606 3607 if (chip_id == RTL8851B) 3608 val = B_AX_DISPATCHER_CLK_EN | B_AX_AXIDMA_CLK_EN; 3609 else 3610 val = B_AX_DISPATCHER_CLK_EN; 3611 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val); 3612 3613 if (chip_id != RTL8852C) 3614 goto dle; 3615 3616 val = rtw89_read32(rtwdev, R_AX_HAXI_INIT_CFG1); 3617 val &= ~(B_AX_DMA_MODE_MASK | B_AX_STOP_AXI_MST); 3618 val |= FIELD_PREP(B_AX_DMA_MODE_MASK, DMA_MOD_PCIE_1B) | 3619 B_AX_TXHCI_EN_V1 | B_AX_RXHCI_EN_V1; 3620 rtw89_write32(rtwdev, R_AX_HAXI_INIT_CFG1, val); 3621 3622 rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP1, 3623 B_AX_STOP_ACH0 | B_AX_STOP_ACH1 | B_AX_STOP_ACH3 | 3624 B_AX_STOP_ACH4 | B_AX_STOP_ACH5 | B_AX_STOP_ACH6 | 3625 B_AX_STOP_ACH7 | B_AX_STOP_CH8 | B_AX_STOP_CH9 | 3626 B_AX_STOP_CH12 | B_AX_STOP_ACH2); 3627 rtw89_write32_clr(rtwdev, R_AX_HAXI_DMA_STOP2, B_AX_STOP_CH10 | B_AX_STOP_CH11); 3628 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_AXIDMA_EN); 3629 3630 dle: 3631 ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode); 3632 if (ret) { 3633 rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret); 3634 return ret; 3635 } 3636 3637 ret = hfc_init(rtwdev, true, false, true); 3638 if (ret) { 3639 rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret); 3640 return ret; 3641 } 3642 3643 return ret; 3644 } 3645 3646 int rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) 3647 { 3648 rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, 3649 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 3650 rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, 3651 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 3652 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 3653 rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 3654 3655 return 0; 3656 } 3657 EXPORT_SYMBOL(rtw89_mac_enable_bb_rf); 3658 3659 int rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) 3660 { 3661 rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, 3662 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 3663 rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, 3664 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 3665 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 3666 rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 3667 3668 return 0; 3669 } 3670 EXPORT_SYMBOL(rtw89_mac_disable_bb_rf); 3671 3672 int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb) 3673 { 3674 int ret; 3675 3676 ret = rtw89_mac_power_switch(rtwdev, true); 3677 if (ret) { 3678 rtw89_mac_power_switch(rtwdev, false); 3679 ret = rtw89_mac_power_switch(rtwdev, true); 3680 if (ret) 3681 return ret; 3682 } 3683 3684 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3685 3686 if (include_bb) { 3687 rtw89_chip_bb_preinit(rtwdev, RTW89_PHY_0); 3688 if (rtwdev->dbcc_en) 3689 rtw89_chip_bb_preinit(rtwdev, RTW89_PHY_1); 3690 } 3691 3692 ret = rtw89_mac_dmac_pre_init(rtwdev); 3693 if (ret) 3694 return ret; 3695 3696 if (rtwdev->hci.ops->mac_pre_init) { 3697 ret = rtwdev->hci.ops->mac_pre_init(rtwdev); 3698 if (ret) 3699 return ret; 3700 } 3701 3702 ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL, include_bb); 3703 if (ret) 3704 return ret; 3705 3706 return 0; 3707 } 3708 3709 int rtw89_mac_init(struct rtw89_dev *rtwdev) 3710 { 3711 const struct rtw89_chip_info *chip = rtwdev->chip; 3712 bool include_bb = !!chip->bbmcu_nr; 3713 int ret; 3714 3715 ret = rtw89_mac_partial_init(rtwdev, include_bb); 3716 if (ret) 3717 goto fail; 3718 3719 ret = rtw89_chip_enable_bb_rf(rtwdev); 3720 if (ret) 3721 goto fail; 3722 3723 ret = rtw89_mac_sys_init(rtwdev); 3724 if (ret) 3725 goto fail; 3726 3727 ret = rtw89_mac_trx_init(rtwdev); 3728 if (ret) 3729 goto fail; 3730 3731 if (rtwdev->hci.ops->mac_post_init) { 3732 ret = rtwdev->hci.ops->mac_post_init(rtwdev); 3733 if (ret) 3734 goto fail; 3735 } 3736 3737 rtw89_fw_send_all_early_h2c(rtwdev); 3738 rtw89_fw_h2c_set_ofld_cfg(rtwdev); 3739 3740 return ret; 3741 fail: 3742 rtw89_mac_power_switch(rtwdev, false); 3743 3744 return ret; 3745 } 3746 3747 static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 3748 { 3749 u8 i; 3750 3751 if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) 3752 return; 3753 3754 for (i = 0; i < 4; i++) { 3755 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 3756 DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2)); 3757 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0); 3758 } 3759 } 3760 3761 static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 3762 { 3763 if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) 3764 return; 3765 3766 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 3767 CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE); 3768 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4); 3769 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 4, 0x400A0004); 3770 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 8, 0); 3771 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 12, 0); 3772 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 16, 0); 3773 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 20, 0xE43000B); 3774 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 24, 0); 3775 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109); 3776 } 3777 3778 int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause) 3779 { 3780 u8 sh = FIELD_GET(GENMASK(4, 0), macid); 3781 u8 grp = macid >> 5; 3782 int ret; 3783 3784 /* If this is called by change_interface() in the case of P2P, it could 3785 * be power-off, so ignore this operation. 3786 */ 3787 if (test_bit(RTW89_FLAG_CHANGING_INTERFACE, rtwdev->flags) && 3788 !test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 3789 return 0; 3790 3791 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 3792 if (ret) 3793 return ret; 3794 3795 rtw89_fw_h2c_macid_pause(rtwdev, sh, grp, pause); 3796 3797 return 0; 3798 } 3799 3800 static const struct rtw89_port_reg rtw89_port_base_ax = { 3801 .port_cfg = R_AX_PORT_CFG_P0, 3802 .tbtt_prohib = R_AX_TBTT_PROHIB_P0, 3803 .bcn_area = R_AX_BCN_AREA_P0, 3804 .bcn_early = R_AX_BCNERLYINT_CFG_P0, 3805 .tbtt_early = R_AX_TBTTERLYINT_CFG_P0, 3806 .tbtt_agg = R_AX_TBTT_AGG_P0, 3807 .bcn_space = R_AX_BCN_SPACE_CFG_P0, 3808 .bcn_forcetx = R_AX_BCN_FORCETX_P0, 3809 .bcn_err_cnt = R_AX_BCN_ERR_CNT_P0, 3810 .bcn_err_flag = R_AX_BCN_ERR_FLAG_P0, 3811 .dtim_ctrl = R_AX_DTIM_CTRL_P0, 3812 .tbtt_shift = R_AX_TBTT_SHIFT_P0, 3813 .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0, 3814 .tsftr_l = R_AX_TSFTR_LOW_P0, 3815 .tsftr_h = R_AX_TSFTR_HIGH_P0, 3816 .md_tsft = R_AX_MD_TSFT_STMP_CTL, 3817 .bss_color = R_AX_PTCL_BSS_COLOR_0, 3818 .mbssid = R_AX_MBSSID_CTRL, 3819 .mbssid_drop = R_AX_MBSSID_DROP_0, 3820 .tsf_sync = R_AX_PORT0_TSF_SYNC, 3821 .hiq_win = {R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG, 3822 R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2, 3823 R_AX_PORT_HGQ_WINDOW_CFG + 3}, 3824 }; 3825 3826 #define BCN_INTERVAL 100 3827 #define BCN_ERLY_DEF 160 3828 #define BCN_SETUP_DEF 2 3829 #define BCN_HOLD_DEF 200 3830 #define BCN_MASK_DEF 0 3831 #define TBTT_ERLY_DEF 5 3832 #define BCN_SET_UNIT 32 3833 #define BCN_ERLY_SET_DLY (10 * 2) 3834 3835 static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, 3836 struct rtw89_vif *rtwvif) 3837 { 3838 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3839 const struct rtw89_port_reg *p = mac->port_base; 3840 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3841 3842 if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN)) 3843 return; 3844 3845 rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK); 3846 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1); 3847 rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK); 3848 rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK); 3849 3850 msleep(vif->bss_conf.beacon_int + 1); 3851 3852 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN | 3853 B_AX_BRK_SETUP); 3854 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST); 3855 rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0); 3856 } 3857 3858 static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, 3859 struct rtw89_vif *rtwvif, bool en) 3860 { 3861 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3862 const struct rtw89_port_reg *p = mac->port_base; 3863 3864 if (en) 3865 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); 3866 else 3867 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); 3868 } 3869 3870 static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, 3871 struct rtw89_vif *rtwvif, bool en) 3872 { 3873 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3874 const struct rtw89_port_reg *p = mac->port_base; 3875 3876 if (en) 3877 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); 3878 else 3879 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); 3880 } 3881 3882 static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, 3883 struct rtw89_vif *rtwvif) 3884 { 3885 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3886 const struct rtw89_port_reg *p = mac->port_base; 3887 3888 rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK, 3889 rtwvif->net_type); 3890 } 3891 3892 static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, 3893 struct rtw89_vif *rtwvif) 3894 { 3895 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3896 const struct rtw89_port_reg *p = mac->port_base; 3897 bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 3898 u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP; 3899 3900 if (en) 3901 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bits); 3902 else 3903 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bits); 3904 } 3905 3906 static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, 3907 struct rtw89_vif *rtwvif) 3908 { 3909 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3910 const struct rtw89_port_reg *p = mac->port_base; 3911 bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || 3912 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 3913 u32 bit = B_AX_RX_BSSID_FIT_EN; 3914 3915 if (en) 3916 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bit); 3917 else 3918 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit); 3919 } 3920 3921 static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, 3922 struct rtw89_vif *rtwvif) 3923 { 3924 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3925 const struct rtw89_port_reg *p = mac->port_base; 3926 bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || 3927 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 3928 3929 if (en) 3930 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); 3931 else 3932 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); 3933 } 3934 3935 static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, 3936 struct rtw89_vif *rtwvif) 3937 { 3938 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3939 const struct rtw89_port_reg *p = mac->port_base; 3940 bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || 3941 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 3942 3943 if (en) 3944 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); 3945 else 3946 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); 3947 } 3948 3949 static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, 3950 struct rtw89_vif *rtwvif) 3951 { 3952 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3953 const struct rtw89_port_reg *p = mac->port_base; 3954 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3955 u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL; 3956 3957 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, 3958 bcn_int); 3959 } 3960 3961 static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev, 3962 struct rtw89_vif *rtwvif) 3963 { 3964 u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0; 3965 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3966 const struct rtw89_port_reg *p = mac->port_base; 3967 u8 port = rtwvif->port; 3968 u32 reg; 3969 3970 reg = rtw89_mac_reg_by_idx(rtwdev, p->hiq_win[port], rtwvif->mac_idx); 3971 rtw89_write8(rtwdev, reg, win); 3972 } 3973 3974 static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev, 3975 struct rtw89_vif *rtwvif) 3976 { 3977 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3978 const struct rtw89_port_reg *p = mac->port_base; 3979 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3980 u32 addr; 3981 3982 addr = rtw89_mac_reg_by_idx(rtwdev, p->md_tsft, rtwvif->mac_idx); 3983 rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE); 3984 3985 rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK, 3986 vif->bss_conf.dtim_period); 3987 } 3988 3989 static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, 3990 struct rtw89_vif *rtwvif) 3991 { 3992 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3993 const struct rtw89_port_reg *p = mac->port_base; 3994 3995 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, 3996 B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF); 3997 } 3998 3999 static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, 4000 struct rtw89_vif *rtwvif) 4001 { 4002 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4003 const struct rtw89_port_reg *p = mac->port_base; 4004 4005 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, 4006 B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF); 4007 } 4008 4009 static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, 4010 struct rtw89_vif *rtwvif) 4011 { 4012 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4013 const struct rtw89_port_reg *p = mac->port_base; 4014 4015 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, 4016 B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF); 4017 } 4018 4019 static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, 4020 struct rtw89_vif *rtwvif) 4021 { 4022 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4023 const struct rtw89_port_reg *p = mac->port_base; 4024 4025 rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, 4026 B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF); 4027 } 4028 4029 static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, 4030 struct rtw89_vif *rtwvif) 4031 { 4032 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4033 const struct rtw89_port_reg *p = mac->port_base; 4034 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 4035 static const u32 masks[RTW89_PORT_NUM] = { 4036 B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK, 4037 B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK, 4038 B_AX_BSS_COLOB_AX_PORT_4_MASK, 4039 }; 4040 u8 port = rtwvif->port; 4041 u32 reg_base; 4042 u32 reg; 4043 u8 bss_color; 4044 4045 bss_color = vif->bss_conf.he_bss_color.color; 4046 reg_base = port >= 4 ? p->bss_color + 4 : p->bss_color; 4047 reg = rtw89_mac_reg_by_idx(rtwdev, reg_base, rtwvif->mac_idx); 4048 rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); 4049 } 4050 4051 static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, 4052 struct rtw89_vif *rtwvif) 4053 { 4054 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4055 const struct rtw89_port_reg *p = mac->port_base; 4056 u8 port = rtwvif->port; 4057 u32 reg; 4058 4059 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 4060 return; 4061 4062 if (port == 0) { 4063 reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid, rtwvif->mac_idx); 4064 rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); 4065 } 4066 } 4067 4068 static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, 4069 struct rtw89_vif *rtwvif) 4070 { 4071 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4072 const struct rtw89_port_reg *p = mac->port_base; 4073 u8 port = rtwvif->port; 4074 u32 reg; 4075 u32 val; 4076 4077 reg = rtw89_mac_reg_by_idx(rtwdev, p->mbssid_drop, rtwvif->mac_idx); 4078 val = rtw89_read32(rtwdev, reg); 4079 val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); 4080 if (port == 0) 4081 val &= ~BIT(0); 4082 rtw89_write32(rtwdev, reg, val); 4083 } 4084 4085 static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, 4086 struct rtw89_vif *rtwvif, bool enable) 4087 { 4088 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4089 const struct rtw89_port_reg *p = mac->port_base; 4090 4091 if (enable) 4092 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, 4093 B_AX_PORT_FUNC_EN); 4094 else 4095 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, 4096 B_AX_PORT_FUNC_EN); 4097 } 4098 4099 static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, 4100 struct rtw89_vif *rtwvif) 4101 { 4102 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4103 const struct rtw89_port_reg *p = mac->port_base; 4104 4105 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, 4106 BCN_ERLY_DEF); 4107 } 4108 4109 static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev, 4110 struct rtw89_vif *rtwvif) 4111 { 4112 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4113 const struct rtw89_port_reg *p = mac->port_base; 4114 u16 val; 4115 4116 if (rtwdev->chip->chip_id != RTL8852C) 4117 return; 4118 4119 if (rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT && 4120 rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION) 4121 return; 4122 4123 val = FIELD_PREP(B_AX_TBTT_SHIFT_OFST_MAG, 1) | 4124 B_AX_TBTT_SHIFT_OFST_SIGN; 4125 4126 rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_shift, 4127 B_AX_TBTT_SHIFT_OFST_MASK, val); 4128 } 4129 4130 void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev, 4131 struct rtw89_vif *rtwvif, 4132 struct rtw89_vif *rtwvif_src, 4133 u16 offset_tu) 4134 { 4135 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4136 const struct rtw89_port_reg *p = mac->port_base; 4137 u32 val, reg; 4138 4139 val = RTW89_PORT_OFFSET_TU_TO_32US(offset_tu); 4140 reg = rtw89_mac_reg_by_idx(rtwdev, p->tsf_sync + rtwvif->port * 4, 4141 rtwvif->mac_idx); 4142 4143 rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_SRC, rtwvif_src->port); 4144 rtw89_write32_mask(rtwdev, reg, B_AX_SYNC_PORT_OFFSET_VAL, val); 4145 rtw89_write32_set(rtwdev, reg, B_AX_SYNC_NOW); 4146 } 4147 4148 static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev, 4149 struct rtw89_vif *rtwvif, 4150 struct rtw89_vif *rtwvif_src, 4151 u8 offset, int *n_offset) 4152 { 4153 if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif == rtwvif_src) 4154 return; 4155 4156 /* adjust offset randomly to avoid beacon conflict */ 4157 offset = offset - offset / 4 + get_random_u32() % (offset / 2); 4158 rtw89_mac_port_tsf_sync(rtwdev, rtwvif, rtwvif_src, 4159 (*n_offset) * offset); 4160 4161 (*n_offset)++; 4162 } 4163 4164 static void rtw89_mac_port_tsf_resync_all(struct rtw89_dev *rtwdev) 4165 { 4166 struct rtw89_vif *src = NULL, *tmp; 4167 u8 offset = 100, vif_aps = 0; 4168 int n_offset = 1; 4169 4170 rtw89_for_each_rtwvif(rtwdev, tmp) { 4171 if (!src || tmp->net_type == RTW89_NET_TYPE_INFRA) 4172 src = tmp; 4173 if (tmp->net_type == RTW89_NET_TYPE_AP_MODE) 4174 vif_aps++; 4175 } 4176 4177 if (vif_aps == 0) 4178 return; 4179 4180 offset /= (vif_aps + 1); 4181 4182 rtw89_for_each_rtwvif(rtwdev, tmp) 4183 rtw89_mac_port_tsf_sync_rand(rtwdev, tmp, src, offset, &n_offset); 4184 } 4185 4186 int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 4187 { 4188 int ret; 4189 4190 ret = rtw89_mac_port_update(rtwdev, rtwvif); 4191 if (ret) 4192 return ret; 4193 4194 rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id); 4195 rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id); 4196 4197 ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif->mac_id, false); 4198 if (ret) 4199 return ret; 4200 4201 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_CREATE); 4202 if (ret) 4203 return ret; 4204 4205 ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true); 4206 if (ret) 4207 return ret; 4208 4209 ret = rtw89_cam_init(rtwdev, rtwvif); 4210 if (ret) 4211 return ret; 4212 4213 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); 4214 if (ret) 4215 return ret; 4216 4217 ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif); 4218 if (ret) 4219 return ret; 4220 4221 return 0; 4222 } 4223 4224 int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 4225 { 4226 int ret; 4227 4228 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_REMOVE); 4229 if (ret) 4230 return ret; 4231 4232 rtw89_cam_deinit(rtwdev, rtwvif); 4233 4234 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); 4235 if (ret) 4236 return ret; 4237 4238 return 0; 4239 } 4240 4241 int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 4242 { 4243 u8 port = rtwvif->port; 4244 4245 if (port >= RTW89_PORT_NUM) 4246 return -EINVAL; 4247 4248 rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif); 4249 rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif, false); 4250 rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif, false); 4251 rtw89_mac_port_cfg_net_type(rtwdev, rtwvif); 4252 rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif); 4253 rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif); 4254 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif); 4255 rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif); 4256 rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif); 4257 rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif); 4258 rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif); 4259 rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif); 4260 rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif); 4261 rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif); 4262 rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif); 4263 rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif); 4264 rtw89_mac_port_cfg_tbtt_shift(rtwdev, rtwvif); 4265 rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif); 4266 rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif); 4267 rtw89_mac_port_cfg_func_en(rtwdev, rtwvif, true); 4268 rtw89_mac_port_tsf_resync_all(rtwdev); 4269 fsleep(BCN_ERLY_SET_DLY); 4270 rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif); 4271 4272 return 0; 4273 } 4274 4275 int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 4276 u64 *tsf) 4277 { 4278 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4279 const struct rtw89_port_reg *p = mac->port_base; 4280 u32 tsf_low, tsf_high; 4281 int ret; 4282 4283 ret = rtw89_mac_check_mac_en(rtwdev, rtwvif->mac_idx, RTW89_CMAC_SEL); 4284 if (ret) 4285 return ret; 4286 4287 tsf_low = rtw89_read32_port(rtwdev, rtwvif, p->tsftr_l); 4288 tsf_high = rtw89_read32_port(rtwdev, rtwvif, p->tsftr_h); 4289 *tsf = (u64)tsf_high << 32 | tsf_low; 4290 4291 return 0; 4292 } 4293 4294 static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, 4295 struct cfg80211_bss *bss, 4296 void *data) 4297 { 4298 const struct cfg80211_bss_ies *ies; 4299 const struct element *elem; 4300 bool *tolerated = data; 4301 4302 rcu_read_lock(); 4303 ies = rcu_dereference(bss->ies); 4304 elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, 4305 ies->len); 4306 4307 if (!elem || elem->datalen < 10 || 4308 !(elem->data[10] & WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) 4309 *tolerated = false; 4310 rcu_read_unlock(); 4311 } 4312 4313 void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, 4314 struct ieee80211_vif *vif) 4315 { 4316 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 4317 struct ieee80211_hw *hw = rtwdev->hw; 4318 bool tolerated = true; 4319 u32 reg; 4320 4321 if (!vif->bss_conf.he_support || vif->type != NL80211_IFTYPE_STATION) 4322 return; 4323 4324 if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) 4325 return; 4326 4327 cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, 4328 rtw89_mac_check_he_obss_narrow_bw_ru_iter, 4329 &tolerated); 4330 4331 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RXTRIG_TEST_USER_2, rtwvif->mac_idx); 4332 if (tolerated) 4333 rtw89_write32_clr(rtwdev, reg, B_AX_RXTRIG_RU26_DIS); 4334 else 4335 rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_RU26_DIS); 4336 } 4337 4338 void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 4339 { 4340 rtw89_mac_port_cfg_func_en(rtwdev, rtwvif, false); 4341 } 4342 4343 int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 4344 { 4345 int ret; 4346 4347 rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map, 4348 RTW89_MAX_MAC_ID_NUM); 4349 if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM) 4350 return -ENOSPC; 4351 4352 ret = rtw89_mac_vif_init(rtwdev, rtwvif); 4353 if (ret) 4354 goto release_mac_id; 4355 4356 return 0; 4357 4358 release_mac_id: 4359 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); 4360 4361 return ret; 4362 } 4363 4364 int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 4365 { 4366 int ret; 4367 4368 ret = rtw89_mac_vif_deinit(rtwdev, rtwvif); 4369 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); 4370 4371 return ret; 4372 } 4373 4374 static void 4375 rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4376 { 4377 } 4378 4379 static bool rtw89_is_op_chan(struct rtw89_dev *rtwdev, u8 band, u8 channel) 4380 { 4381 const struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 4382 4383 return band == op->band_type && channel == op->primary_channel; 4384 } 4385 4386 static void 4387 rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 4388 u32 len) 4389 { 4390 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 4391 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 4392 struct rtw89_chan new; 4393 u8 reason, status, tx_fail, band, actual_period; 4394 u32 last_chan = rtwdev->scan_info.last_chan_idx; 4395 u16 chan; 4396 int ret; 4397 4398 if (!rtwvif) 4399 return; 4400 4401 tx_fail = RTW89_GET_MAC_C2H_SCANOFLD_TX_FAIL(c2h->data); 4402 status = RTW89_GET_MAC_C2H_SCANOFLD_STATUS(c2h->data); 4403 chan = RTW89_GET_MAC_C2H_SCANOFLD_PRI_CH(c2h->data); 4404 reason = RTW89_GET_MAC_C2H_SCANOFLD_RSP(c2h->data); 4405 band = RTW89_GET_MAC_C2H_SCANOFLD_BAND(c2h->data); 4406 actual_period = RTW89_GET_MAC_C2H_ACTUAL_PERIOD(c2h->data); 4407 4408 if (!(rtwdev->chip->support_bands & BIT(NL80211_BAND_6GHZ))) 4409 band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G; 4410 4411 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 4412 "band: %d, chan: %d, reason: %d, status: %d, tx_fail: %d, actual: %d\n", 4413 band, chan, reason, status, tx_fail, actual_period); 4414 4415 switch (reason) { 4416 case RTW89_SCAN_LEAVE_CH_NOTIFY: 4417 if (rtw89_is_op_chan(rtwdev, band, chan)) 4418 ieee80211_stop_queues(rtwdev->hw); 4419 return; 4420 case RTW89_SCAN_END_SCAN_NOTIFY: 4421 if (rtwvif && rtwvif->scan_req && 4422 last_chan < rtwvif->scan_req->n_channels) { 4423 ret = rtw89_hw_scan_offload(rtwdev, vif, true); 4424 if (ret) { 4425 rtw89_hw_scan_abort(rtwdev, vif); 4426 rtw89_warn(rtwdev, "HW scan failed: %d\n", ret); 4427 } 4428 } else { 4429 rtw89_hw_scan_complete(rtwdev, vif, false); 4430 } 4431 break; 4432 case RTW89_SCAN_ENTER_CH_NOTIFY: 4433 if (rtw89_is_op_chan(rtwdev, band, chan)) { 4434 rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx, 4435 &rtwdev->scan_info.op_chan); 4436 ieee80211_wake_queues(rtwdev->hw); 4437 } else { 4438 rtw89_chan_create(&new, chan, chan, band, 4439 RTW89_CHANNEL_WIDTH_20); 4440 rtw89_assign_entity_chan(rtwdev, rtwvif->sub_entity_idx, 4441 &new); 4442 } 4443 break; 4444 default: 4445 return; 4446 } 4447 } 4448 4449 static void 4450 rtw89_mac_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 4451 struct sk_buff *skb) 4452 { 4453 struct ieee80211_vif *vif = rtwvif_to_vif_safe(rtwvif); 4454 enum nl80211_cqm_rssi_threshold_event nl_event; 4455 const struct rtw89_c2h_mac_bcnfltr_rpt *c2h = 4456 (const struct rtw89_c2h_mac_bcnfltr_rpt *)skb->data; 4457 u8 type, event, mac_id; 4458 s8 sig; 4459 4460 type = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_TYPE); 4461 sig = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_MA) - MAX_RSSI; 4462 event = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_EVENT); 4463 mac_id = le32_get_bits(c2h->w2, RTW89_C2H_MAC_BCNFLTR_RPT_W2_MACID); 4464 4465 if (mac_id != rtwvif->mac_id) 4466 return; 4467 4468 rtw89_debug(rtwdev, RTW89_DBG_FW, 4469 "C2H bcnfltr rpt macid: %d, type: %d, ma: %d, event: %d\n", 4470 mac_id, type, sig, event); 4471 4472 switch (type) { 4473 case RTW89_BCN_FLTR_BEACON_LOSS: 4474 if (!rtwdev->scanning && !rtwvif->offchan) 4475 ieee80211_connection_loss(vif); 4476 else 4477 rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); 4478 return; 4479 case RTW89_BCN_FLTR_NOTIFY: 4480 nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; 4481 break; 4482 case RTW89_BCN_FLTR_RSSI: 4483 if (event == RTW89_BCN_FLTR_RSSI_LOW) 4484 nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; 4485 else if (event == RTW89_BCN_FLTR_RSSI_HIGH) 4486 nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; 4487 else 4488 return; 4489 break; 4490 default: 4491 return; 4492 } 4493 4494 ieee80211_cqm_rssi_notify(vif, nl_event, sig, GFP_KERNEL); 4495 } 4496 4497 static void 4498 rtw89_mac_c2h_bcn_fltr_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 4499 u32 len) 4500 { 4501 struct rtw89_vif *rtwvif; 4502 4503 rtw89_for_each_rtwvif(rtwdev, rtwvif) 4504 rtw89_mac_bcn_fltr_rpt(rtwdev, rtwvif, c2h); 4505 } 4506 4507 static void 4508 rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4509 { 4510 /* N.B. This will run in interrupt context. */ 4511 4512 rtw89_debug(rtwdev, RTW89_DBG_FW, 4513 "C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n", 4514 RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data), 4515 RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h->data), 4516 RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h->data), 4517 RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h->data)); 4518 } 4519 4520 static void 4521 rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, u32 len) 4522 { 4523 /* N.B. This will run in interrupt context. */ 4524 struct rtw89_wait_info *fw_ofld_wait = &rtwdev->mac.fw_ofld_wait; 4525 const struct rtw89_c2h_done_ack *c2h = 4526 (const struct rtw89_c2h_done_ack *)skb_c2h->data; 4527 u8 h2c_cat = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CAT); 4528 u8 h2c_class = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_CLASS); 4529 u8 h2c_func = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_FUNC); 4530 u8 h2c_return = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_H2C_RETURN); 4531 u8 h2c_seq = le32_get_bits(c2h->w2, RTW89_C2H_DONE_ACK_W2_H2C_SEQ); 4532 struct rtw89_completion_data data = {}; 4533 unsigned int cond; 4534 4535 rtw89_debug(rtwdev, RTW89_DBG_FW, 4536 "C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n", 4537 h2c_cat, h2c_class, h2c_func, h2c_return, h2c_seq); 4538 4539 if (h2c_cat != H2C_CAT_MAC) 4540 return; 4541 4542 switch (h2c_class) { 4543 default: 4544 return; 4545 case H2C_CL_MAC_FW_OFLD: 4546 switch (h2c_func) { 4547 default: 4548 return; 4549 case H2C_FUNC_ADD_SCANOFLD_CH: 4550 case H2C_FUNC_SCANOFLD: 4551 cond = RTW89_FW_OFLD_WAIT_COND(0, h2c_func); 4552 break; 4553 } 4554 4555 data.err = !!h2c_return; 4556 rtw89_complete_cond(fw_ofld_wait, cond, &data); 4557 return; 4558 } 4559 } 4560 4561 static void 4562 rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4563 { 4564 rtw89_fw_log_dump(rtwdev, c2h->data, len); 4565 } 4566 4567 static void 4568 rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4569 { 4570 } 4571 4572 static void 4573 rtw89_mac_c2h_pkt_ofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb_c2h, 4574 u32 len) 4575 { 4576 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4577 const struct rtw89_c2h_pkt_ofld_rsp *c2h = 4578 (const struct rtw89_c2h_pkt_ofld_rsp *)skb_c2h->data; 4579 u16 pkt_len = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_LEN); 4580 u8 pkt_id = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_ID); 4581 u8 pkt_op = le32_get_bits(c2h->w2, RTW89_C2H_PKT_OFLD_RSP_W2_PTK_OP); 4582 struct rtw89_completion_data data = {}; 4583 unsigned int cond; 4584 4585 rtw89_debug(rtwdev, RTW89_DBG_FW, "pkt ofld rsp: id %d op %d len %d\n", 4586 pkt_id, pkt_op, pkt_len); 4587 4588 data.err = !pkt_len; 4589 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(pkt_id, pkt_op); 4590 4591 rtw89_complete_cond(wait, cond, &data); 4592 } 4593 4594 static void 4595 rtw89_mac_c2h_tsf32_toggle_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, 4596 u32 len) 4597 { 4598 rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_TSF32_TOGGLE_CHANGE); 4599 } 4600 4601 static void 4602 rtw89_mac_c2h_mcc_rcv_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4603 { 4604 u8 group = RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h->data); 4605 u8 func = RTW89_GET_MAC_C2H_MCC_RCV_ACK_H2C_FUNC(c2h->data); 4606 4607 switch (func) { 4608 case H2C_FUNC_ADD_MCC: 4609 case H2C_FUNC_START_MCC: 4610 case H2C_FUNC_STOP_MCC: 4611 case H2C_FUNC_DEL_MCC_GROUP: 4612 case H2C_FUNC_RESET_MCC_GROUP: 4613 case H2C_FUNC_MCC_REQ_TSF: 4614 case H2C_FUNC_MCC_MACID_BITMAP: 4615 case H2C_FUNC_MCC_SYNC: 4616 case H2C_FUNC_MCC_SET_DURATION: 4617 break; 4618 default: 4619 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 4620 "invalid MCC C2H RCV ACK: func %d\n", func); 4621 return; 4622 } 4623 4624 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 4625 "MCC C2H RCV ACK: group %d, func %d\n", group, func); 4626 } 4627 4628 static void 4629 rtw89_mac_c2h_mcc_req_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4630 { 4631 u8 group = RTW89_GET_MAC_C2H_MCC_REQ_ACK_GROUP(c2h->data); 4632 u8 func = RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_FUNC(c2h->data); 4633 u8 retcode = RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_RETURN(c2h->data); 4634 struct rtw89_completion_data data = {}; 4635 unsigned int cond; 4636 bool next = false; 4637 4638 switch (func) { 4639 case H2C_FUNC_MCC_REQ_TSF: 4640 next = true; 4641 break; 4642 case H2C_FUNC_MCC_MACID_BITMAP: 4643 case H2C_FUNC_MCC_SYNC: 4644 case H2C_FUNC_MCC_SET_DURATION: 4645 break; 4646 case H2C_FUNC_ADD_MCC: 4647 case H2C_FUNC_START_MCC: 4648 case H2C_FUNC_STOP_MCC: 4649 case H2C_FUNC_DEL_MCC_GROUP: 4650 case H2C_FUNC_RESET_MCC_GROUP: 4651 default: 4652 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 4653 "invalid MCC C2H REQ ACK: func %d\n", func); 4654 return; 4655 } 4656 4657 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 4658 "MCC C2H REQ ACK: group %d, func %d, return code %d\n", 4659 group, func, retcode); 4660 4661 if (!retcode && next) 4662 return; 4663 4664 data.err = !!retcode; 4665 cond = RTW89_MCC_WAIT_COND(group, func); 4666 rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data); 4667 } 4668 4669 static void 4670 rtw89_mac_c2h_mcc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4671 { 4672 u8 group = RTW89_GET_MAC_C2H_MCC_TSF_RPT_GROUP(c2h->data); 4673 struct rtw89_completion_data data = {}; 4674 struct rtw89_mac_mcc_tsf_rpt *rpt; 4675 unsigned int cond; 4676 4677 rpt = (struct rtw89_mac_mcc_tsf_rpt *)data.buf; 4678 rpt->macid_x = RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_X(c2h->data); 4679 rpt->macid_y = RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_Y(c2h->data); 4680 rpt->tsf_x_low = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_X(c2h->data); 4681 rpt->tsf_x_high = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_X(c2h->data); 4682 rpt->tsf_y_low = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h->data); 4683 rpt->tsf_y_high = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h->data); 4684 4685 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 4686 "MCC C2H TSF RPT: macid %d> %llu, macid %d> %llu\n", 4687 rpt->macid_x, (u64)rpt->tsf_x_high << 32 | rpt->tsf_x_low, 4688 rpt->macid_y, (u64)rpt->tsf_y_high << 32 | rpt->tsf_y_low); 4689 4690 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_REQ_TSF); 4691 rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data); 4692 } 4693 4694 static void 4695 rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 4696 { 4697 u8 group = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_GROUP(c2h->data); 4698 u8 macid = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_MACID(c2h->data); 4699 u8 status = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_STATUS(c2h->data); 4700 u32 tsf_low = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_LOW(c2h->data); 4701 u32 tsf_high = RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h->data); 4702 struct rtw89_completion_data data = {}; 4703 unsigned int cond; 4704 bool rsp = true; 4705 bool err; 4706 u8 func; 4707 4708 switch (status) { 4709 case RTW89_MAC_MCC_ADD_ROLE_OK: 4710 case RTW89_MAC_MCC_ADD_ROLE_FAIL: 4711 func = H2C_FUNC_ADD_MCC; 4712 err = status == RTW89_MAC_MCC_ADD_ROLE_FAIL; 4713 break; 4714 case RTW89_MAC_MCC_START_GROUP_OK: 4715 case RTW89_MAC_MCC_START_GROUP_FAIL: 4716 func = H2C_FUNC_START_MCC; 4717 err = status == RTW89_MAC_MCC_START_GROUP_FAIL; 4718 break; 4719 case RTW89_MAC_MCC_STOP_GROUP_OK: 4720 case RTW89_MAC_MCC_STOP_GROUP_FAIL: 4721 func = H2C_FUNC_STOP_MCC; 4722 err = status == RTW89_MAC_MCC_STOP_GROUP_FAIL; 4723 break; 4724 case RTW89_MAC_MCC_DEL_GROUP_OK: 4725 case RTW89_MAC_MCC_DEL_GROUP_FAIL: 4726 func = H2C_FUNC_DEL_MCC_GROUP; 4727 err = status == RTW89_MAC_MCC_DEL_GROUP_FAIL; 4728 break; 4729 case RTW89_MAC_MCC_RESET_GROUP_OK: 4730 case RTW89_MAC_MCC_RESET_GROUP_FAIL: 4731 func = H2C_FUNC_RESET_MCC_GROUP; 4732 err = status == RTW89_MAC_MCC_RESET_GROUP_FAIL; 4733 break; 4734 case RTW89_MAC_MCC_SWITCH_CH_OK: 4735 case RTW89_MAC_MCC_SWITCH_CH_FAIL: 4736 case RTW89_MAC_MCC_TXNULL0_OK: 4737 case RTW89_MAC_MCC_TXNULL0_FAIL: 4738 case RTW89_MAC_MCC_TXNULL1_OK: 4739 case RTW89_MAC_MCC_TXNULL1_FAIL: 4740 case RTW89_MAC_MCC_SWITCH_EARLY: 4741 case RTW89_MAC_MCC_TBTT: 4742 case RTW89_MAC_MCC_DURATION_START: 4743 case RTW89_MAC_MCC_DURATION_END: 4744 rsp = false; 4745 break; 4746 default: 4747 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 4748 "invalid MCC C2H STS RPT: status %d\n", status); 4749 return; 4750 } 4751 4752 rtw89_debug(rtwdev, RTW89_DBG_CHAN, 4753 "MCC C2H STS RPT: group %d, macid %d, status %d, tsf %llu\n", 4754 group, macid, status, (u64)tsf_high << 32 | tsf_low); 4755 4756 if (!rsp) 4757 return; 4758 4759 data.err = err; 4760 cond = RTW89_MCC_WAIT_COND(group, func); 4761 rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data); 4762 } 4763 4764 static 4765 void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, 4766 struct sk_buff *c2h, u32 len) = { 4767 [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL, 4768 [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL, 4769 [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = rtw89_mac_c2h_pkt_ofld_rsp, 4770 [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, 4771 [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, 4772 [RTW89_MAC_C2H_FUNC_SCANOFLD_RSP] = rtw89_mac_c2h_scanofld_rsp, 4773 [RTW89_MAC_C2H_FUNC_TSF32_TOGL_RPT] = rtw89_mac_c2h_tsf32_toggle_rpt, 4774 [RTW89_MAC_C2H_FUNC_BCNFLTR_RPT] = rtw89_mac_c2h_bcn_fltr_rpt, 4775 }; 4776 4777 static 4778 void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev, 4779 struct sk_buff *c2h, u32 len) = { 4780 [RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack, 4781 [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack, 4782 [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log, 4783 [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt, 4784 }; 4785 4786 static 4787 void (* const rtw89_mac_c2h_mcc_handler[])(struct rtw89_dev *rtwdev, 4788 struct sk_buff *c2h, u32 len) = { 4789 [RTW89_MAC_C2H_FUNC_MCC_RCV_ACK] = rtw89_mac_c2h_mcc_rcv_ack, 4790 [RTW89_MAC_C2H_FUNC_MCC_REQ_ACK] = rtw89_mac_c2h_mcc_req_ack, 4791 [RTW89_MAC_C2H_FUNC_MCC_TSF_RPT] = rtw89_mac_c2h_mcc_tsf_rpt, 4792 [RTW89_MAC_C2H_FUNC_MCC_STATUS_RPT] = rtw89_mac_c2h_mcc_status_rpt, 4793 }; 4794 4795 bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, u8 class, u8 func) 4796 { 4797 switch (class) { 4798 default: 4799 return false; 4800 case RTW89_MAC_C2H_CLASS_INFO: 4801 switch (func) { 4802 default: 4803 return false; 4804 case RTW89_MAC_C2H_FUNC_REC_ACK: 4805 case RTW89_MAC_C2H_FUNC_DONE_ACK: 4806 return true; 4807 } 4808 case RTW89_MAC_C2H_CLASS_OFLD: 4809 switch (func) { 4810 default: 4811 return false; 4812 case RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP: 4813 return true; 4814 } 4815 case RTW89_MAC_C2H_CLASS_MCC: 4816 return true; 4817 } 4818 } 4819 4820 void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 4821 u32 len, u8 class, u8 func) 4822 { 4823 void (*handler)(struct rtw89_dev *rtwdev, 4824 struct sk_buff *c2h, u32 len) = NULL; 4825 4826 switch (class) { 4827 case RTW89_MAC_C2H_CLASS_INFO: 4828 if (func < RTW89_MAC_C2H_FUNC_INFO_MAX) 4829 handler = rtw89_mac_c2h_info_handler[func]; 4830 break; 4831 case RTW89_MAC_C2H_CLASS_OFLD: 4832 if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX) 4833 handler = rtw89_mac_c2h_ofld_handler[func]; 4834 break; 4835 case RTW89_MAC_C2H_CLASS_MCC: 4836 if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MCC) 4837 handler = rtw89_mac_c2h_mcc_handler[func]; 4838 break; 4839 case RTW89_MAC_C2H_CLASS_FWDBG: 4840 return; 4841 default: 4842 rtw89_info(rtwdev, "c2h class %d not support\n", class); 4843 return; 4844 } 4845 if (!handler) { 4846 rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, 4847 func); 4848 return; 4849 } 4850 handler(rtwdev, skb, len); 4851 } 4852 4853 static 4854 bool rtw89_mac_get_txpwr_cr_ax(struct rtw89_dev *rtwdev, 4855 enum rtw89_phy_idx phy_idx, 4856 u32 reg_base, u32 *cr) 4857 { 4858 const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; 4859 enum rtw89_qta_mode mode = dle_mem->mode; 4860 u32 addr = rtw89_mac_reg_by_idx(rtwdev, reg_base, phy_idx); 4861 4862 if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR_AX) { 4863 rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", 4864 addr); 4865 goto error; 4866 } 4867 4868 if (addr >= CMAC1_START_ADDR_AX && addr <= CMAC1_END_ADDR_AX) 4869 if (mode == RTW89_QTA_SCC) { 4870 rtw89_err(rtwdev, 4871 "[TXPWR] addr=0x%x but hw not enable\n", 4872 addr); 4873 goto error; 4874 } 4875 4876 *cr = addr; 4877 return true; 4878 4879 error: 4880 rtw89_err(rtwdev, "[TXPWR] check txpwr cr 0x%x(phy%d) fail\n", 4881 addr, phy_idx); 4882 4883 return false; 4884 } 4885 4886 int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) 4887 { 4888 u32 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PPDU_STAT, mac_idx); 4889 int ret; 4890 4891 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 4892 if (ret) 4893 return ret; 4894 4895 if (!enable) { 4896 rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN); 4897 return 0; 4898 } 4899 4900 rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN | 4901 B_AX_APP_MAC_INFO_RPT | 4902 B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT | 4903 B_AX_PPDU_STAT_RPT_CRC32); 4904 rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK, 4905 RTW89_PRPT_DEST_HOST); 4906 4907 return 0; 4908 } 4909 EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status); 4910 4911 void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) 4912 { 4913 #define MAC_AX_TIME_TH_SH 5 4914 #define MAC_AX_LEN_TH_SH 4 4915 #define MAC_AX_TIME_TH_MAX 255 4916 #define MAC_AX_LEN_TH_MAX 255 4917 #define MAC_AX_TIME_TH_DEF 88 4918 #define MAC_AX_LEN_TH_DEF 4080 4919 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4920 struct ieee80211_hw *hw = rtwdev->hw; 4921 u32 rts_threshold = hw->wiphy->rts_threshold; 4922 u32 time_th, len_th; 4923 u32 reg; 4924 4925 if (rts_threshold == (u32)-1) { 4926 time_th = MAC_AX_TIME_TH_DEF; 4927 len_th = MAC_AX_LEN_TH_DEF; 4928 } else { 4929 time_th = MAC_AX_TIME_TH_MAX << MAC_AX_TIME_TH_SH; 4930 len_th = rts_threshold; 4931 } 4932 4933 time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); 4934 len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); 4935 4936 reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_len_ht, mac_idx); 4937 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); 4938 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); 4939 } 4940 4941 void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop) 4942 { 4943 bool empty; 4944 int ret; 4945 4946 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 4947 return; 4948 4949 ret = read_poll_timeout(dle_is_txq_empty, empty, empty, 4950 10000, 200000, false, rtwdev); 4951 if (ret && !drop && (rtwdev->total_sta_assoc || rtwdev->scanning)) 4952 rtw89_info(rtwdev, "timed out to flush queues\n"); 4953 } 4954 4955 int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex) 4956 { 4957 u8 val; 4958 u16 val16; 4959 u32 val32; 4960 int ret; 4961 4962 rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT); 4963 if (rtwdev->chip->chip_id != RTL8851B) 4964 rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN); 4965 rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8); 4966 rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK); 4967 rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16); 4968 if (rtwdev->chip->chip_id != RTL8851B) 4969 rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24); 4970 4971 val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0); 4972 val16 = (val16 | B_AX_BTCCA_EN) & ~B_AX_BTCCA_BRK_TXOP_EN; 4973 rtw89_write16(rtwdev, R_AX_CCA_CFG_0, val16); 4974 4975 ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32); 4976 if (ret) { 4977 rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n"); 4978 return ret; 4979 } 4980 val32 = val32 & B_AX_WL_RX_CTRL; 4981 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32); 4982 if (ret) { 4983 rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n"); 4984 return ret; 4985 } 4986 4987 switch (coex->pta_mode) { 4988 case RTW89_MAC_AX_COEX_RTK_MODE: 4989 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 4990 val &= ~B_AX_BTMODE_MASK; 4991 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_0_3); 4992 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 4993 4994 val = rtw89_read8(rtwdev, R_AX_TDMA_MODE); 4995 rtw89_write8(rtwdev, R_AX_TDMA_MODE, val | B_AX_RTK_BT_ENABLE); 4996 4997 val = rtw89_read8(rtwdev, R_AX_BT_COEX_CFG_5); 4998 val &= ~B_AX_BT_RPT_SAMPLE_RATE_MASK; 4999 val |= FIELD_PREP(B_AX_BT_RPT_SAMPLE_RATE_MASK, MAC_AX_RTK_RATE); 5000 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_5, val); 5001 break; 5002 case RTW89_MAC_AX_COEX_CSR_MODE: 5003 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 5004 val &= ~B_AX_BTMODE_MASK; 5005 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_2); 5006 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 5007 5008 val16 = rtw89_read16(rtwdev, R_AX_CSR_MODE); 5009 val16 &= ~B_AX_BT_PRI_DETECT_TO_MASK; 5010 val16 |= FIELD_PREP(B_AX_BT_PRI_DETECT_TO_MASK, MAC_AX_CSR_PRI_TO); 5011 val16 &= ~B_AX_BT_TRX_INIT_DETECT_MASK; 5012 val16 |= FIELD_PREP(B_AX_BT_TRX_INIT_DETECT_MASK, MAC_AX_CSR_TRX_TO); 5013 val16 &= ~B_AX_BT_STAT_DELAY_MASK; 5014 val16 |= FIELD_PREP(B_AX_BT_STAT_DELAY_MASK, MAC_AX_CSR_DELAY); 5015 val16 |= B_AX_ENHANCED_BT; 5016 rtw89_write16(rtwdev, R_AX_CSR_MODE, val16); 5017 5018 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_2, MAC_AX_CSR_RATE); 5019 break; 5020 default: 5021 return -EINVAL; 5022 } 5023 5024 switch (coex->direction) { 5025 case RTW89_MAC_AX_COEX_INNER: 5026 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 5027 val = (val & ~BIT(2)) | BIT(1); 5028 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 5029 break; 5030 case RTW89_MAC_AX_COEX_OUTPUT: 5031 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 5032 val = val | BIT(1) | BIT(0); 5033 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 5034 break; 5035 case RTW89_MAC_AX_COEX_INPUT: 5036 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 5037 val = val & ~(BIT(2) | BIT(1)); 5038 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 5039 break; 5040 default: 5041 return -EINVAL; 5042 } 5043 5044 return 0; 5045 } 5046 EXPORT_SYMBOL(rtw89_mac_coex_init); 5047 5048 int rtw89_mac_coex_init_v1(struct rtw89_dev *rtwdev, 5049 const struct rtw89_mac_ax_coex *coex) 5050 { 5051 rtw89_write32_set(rtwdev, R_AX_BTC_CFG, 5052 B_AX_BTC_EN | B_AX_BTG_LNA1_GAIN_SEL); 5053 rtw89_write32_set(rtwdev, R_AX_BT_CNT_CFG, B_AX_BT_CNT_EN); 5054 rtw89_write16_set(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_EN); 5055 rtw89_write16_clr(rtwdev, R_AX_CCA_CFG_0, B_AX_BTCCA_BRK_TXOP_EN); 5056 5057 switch (coex->pta_mode) { 5058 case RTW89_MAC_AX_COEX_RTK_MODE: 5059 rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK, 5060 MAC_AX_RTK_MODE); 5061 rtw89_write32_mask(rtwdev, R_AX_RTK_MODE_CFG_V1, 5062 B_AX_SAMPLE_CLK_MASK, MAC_AX_RTK_RATE); 5063 break; 5064 case RTW89_MAC_AX_COEX_CSR_MODE: 5065 rtw89_write32_mask(rtwdev, R_AX_BTC_CFG, B_AX_BTC_MODE_MASK, 5066 MAC_AX_CSR_MODE); 5067 break; 5068 default: 5069 return -EINVAL; 5070 } 5071 5072 return 0; 5073 } 5074 EXPORT_SYMBOL(rtw89_mac_coex_init_v1); 5075 5076 int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, 5077 const struct rtw89_mac_ax_coex_gnt *gnt_cfg) 5078 { 5079 u32 val = 0, ret; 5080 5081 if (gnt_cfg->band[0].gnt_bt) 5082 val |= B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL; 5083 5084 if (gnt_cfg->band[0].gnt_bt_sw_en) 5085 val |= B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL; 5086 5087 if (gnt_cfg->band[0].gnt_wl) 5088 val |= B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL; 5089 5090 if (gnt_cfg->band[0].gnt_wl_sw_en) 5091 val |= B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL; 5092 5093 if (gnt_cfg->band[1].gnt_bt) 5094 val |= B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL; 5095 5096 if (gnt_cfg->band[1].gnt_bt_sw_en) 5097 val |= B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL; 5098 5099 if (gnt_cfg->band[1].gnt_wl) 5100 val |= B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL; 5101 5102 if (gnt_cfg->band[1].gnt_wl_sw_en) 5103 val |= B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL; 5104 5105 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val); 5106 if (ret) { 5107 rtw89_err(rtwdev, "Write LTE fail!\n"); 5108 return ret; 5109 } 5110 5111 return 0; 5112 } 5113 EXPORT_SYMBOL(rtw89_mac_cfg_gnt); 5114 5115 int rtw89_mac_cfg_gnt_v1(struct rtw89_dev *rtwdev, 5116 const struct rtw89_mac_ax_coex_gnt *gnt_cfg) 5117 { 5118 u32 val = 0; 5119 5120 if (gnt_cfg->band[0].gnt_bt) 5121 val |= B_AX_GNT_BT_RFC_S0_VAL | B_AX_GNT_BT_RX_VAL | 5122 B_AX_GNT_BT_TX_VAL; 5123 else 5124 val |= B_AX_WL_ACT_VAL; 5125 5126 if (gnt_cfg->band[0].gnt_bt_sw_en) 5127 val |= B_AX_GNT_BT_RFC_S0_SWCTRL | B_AX_GNT_BT_RX_SWCTRL | 5128 B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL; 5129 5130 if (gnt_cfg->band[0].gnt_wl) 5131 val |= B_AX_GNT_WL_RFC_S0_VAL | B_AX_GNT_WL_RX_VAL | 5132 B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL; 5133 5134 if (gnt_cfg->band[0].gnt_wl_sw_en) 5135 val |= B_AX_GNT_WL_RFC_S0_SWCTRL | B_AX_GNT_WL_RX_SWCTRL | 5136 B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL; 5137 5138 if (gnt_cfg->band[1].gnt_bt) 5139 val |= B_AX_GNT_BT_RFC_S1_VAL | B_AX_GNT_BT_RX_VAL | 5140 B_AX_GNT_BT_TX_VAL; 5141 else 5142 val |= B_AX_WL_ACT_VAL; 5143 5144 if (gnt_cfg->band[1].gnt_bt_sw_en) 5145 val |= B_AX_GNT_BT_RFC_S1_SWCTRL | B_AX_GNT_BT_RX_SWCTRL | 5146 B_AX_GNT_BT_TX_SWCTRL | B_AX_WL_ACT_SWCTRL; 5147 5148 if (gnt_cfg->band[1].gnt_wl) 5149 val |= B_AX_GNT_WL_RFC_S1_VAL | B_AX_GNT_WL_RX_VAL | 5150 B_AX_GNT_WL_TX_VAL | B_AX_GNT_WL_BB_VAL; 5151 5152 if (gnt_cfg->band[1].gnt_wl_sw_en) 5153 val |= B_AX_GNT_WL_RFC_S1_SWCTRL | B_AX_GNT_WL_RX_SWCTRL | 5154 B_AX_GNT_WL_TX_SWCTRL | B_AX_GNT_WL_BB_SWCTRL; 5155 5156 rtw89_write32(rtwdev, R_AX_GNT_SW_CTRL, val); 5157 5158 return 0; 5159 } 5160 EXPORT_SYMBOL(rtw89_mac_cfg_gnt_v1); 5161 5162 int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt) 5163 { 5164 u32 reg; 5165 u16 val; 5166 int ret; 5167 5168 ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL); 5169 if (ret) 5170 return ret; 5171 5172 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BT_PLT, plt->band); 5173 val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) | 5174 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) | 5175 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) | 5176 (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_TX_PLT_GNT_WL : 0) | 5177 (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) | 5178 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) | 5179 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) | 5180 (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0) | 5181 B_AX_PLT_EN; 5182 rtw89_write16(rtwdev, reg, val); 5183 5184 return 0; 5185 } 5186 5187 void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val) 5188 { 5189 u32 fw_sb; 5190 5191 fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD); 5192 fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb); 5193 fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY; 5194 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 5195 fw_sb = fw_sb | MAC_AX_NOTIFY_PWR_MAJOR; 5196 else 5197 fw_sb = fw_sb | MAC_AX_NOTIFY_TP_MAJOR; 5198 val = FIELD_GET(B_MAC_AX_SB_DRV_MASK, val); 5199 val = B_AX_TOGGLE | 5200 FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) | 5201 FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb); 5202 rtw89_write32(rtwdev, R_AX_SCOREBOARD, val); 5203 fsleep(1000); /* avoid BT FW loss information */ 5204 } 5205 5206 u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev) 5207 { 5208 return rtw89_read32(rtwdev, R_AX_SCOREBOARD); 5209 } 5210 5211 int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl) 5212 { 5213 u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); 5214 5215 val = wl ? val | BIT(2) : val & ~BIT(2); 5216 rtw89_write8(rtwdev, R_AX_SYS_SDIO_CTRL + 3, val); 5217 5218 return 0; 5219 } 5220 EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path); 5221 5222 int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl) 5223 { 5224 struct rtw89_btc *btc = &rtwdev->btc; 5225 struct rtw89_btc_dm *dm = &btc->dm; 5226 struct rtw89_mac_ax_gnt *g = dm->gnt.band; 5227 int i; 5228 5229 if (wl) 5230 return 0; 5231 5232 for (i = 0; i < RTW89_PHY_MAX; i++) { 5233 g[i].gnt_bt_sw_en = 1; 5234 g[i].gnt_bt = 1; 5235 g[i].gnt_wl_sw_en = 1; 5236 g[i].gnt_wl = 0; 5237 } 5238 5239 return rtw89_mac_cfg_gnt_v1(rtwdev, &dm->gnt); 5240 } 5241 EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1); 5242 5243 bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) 5244 { 5245 const struct rtw89_chip_info *chip = rtwdev->chip; 5246 u8 val = 0; 5247 5248 if (chip->chip_id == RTL8852C) 5249 return false; 5250 else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B) 5251 val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3, 5252 B_AX_LTE_MUX_CTRL_PATH >> 24); 5253 5254 return !!val; 5255 } 5256 5257 u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band) 5258 { 5259 u32 reg; 5260 u16 cnt; 5261 5262 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BT_PLT, band); 5263 cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK); 5264 rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST); 5265 5266 return cnt; 5267 } 5268 5269 static void rtw89_mac_bfee_standby_timer(struct rtw89_dev *rtwdev, u8 mac_idx, 5270 bool keep) 5271 { 5272 u32 reg; 5273 5274 if (rtwdev->chip->chip_gen != RTW89_CHIP_AX) 5275 return; 5276 5277 rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee standby_timer to %d\n", keep); 5278 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); 5279 if (keep) { 5280 set_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags); 5281 rtw89_write32_mask(rtwdev, reg, B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, 5282 BFRP_RX_STANDBY_TIMER_KEEP); 5283 } else { 5284 clear_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags); 5285 rtw89_write32_mask(rtwdev, reg, B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, 5286 BFRP_RX_STANDBY_TIMER_RELEASE); 5287 } 5288 } 5289 5290 void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 5291 { 5292 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 5293 u32 reg; 5294 u32 mask = mac->bfee_ctrl.mask; 5295 5296 rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); 5297 reg = rtw89_mac_reg_by_idx(rtwdev, mac->bfee_ctrl.addr, mac_idx); 5298 if (en) { 5299 set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 5300 rtw89_write32_set(rtwdev, reg, mask); 5301 } else { 5302 clear_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 5303 rtw89_write32_clr(rtwdev, reg, mask); 5304 } 5305 } 5306 5307 static int rtw89_mac_init_bfee_ax(struct rtw89_dev *rtwdev, u8 mac_idx) 5308 { 5309 u32 reg; 5310 u32 val32; 5311 int ret; 5312 5313 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 5314 if (ret) 5315 return ret; 5316 5317 /* AP mode set tx gid to 63 */ 5318 /* STA mode set tx gid to 0(default) */ 5319 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMER_CTRL_0, mac_idx); 5320 rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN); 5321 5322 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); 5323 rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP); 5324 5325 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_BFMEE_RESP_OPTION, mac_idx); 5326 val32 = FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER); 5327 rtw89_write32(rtwdev, reg, val32); 5328 rtw89_mac_bfee_standby_timer(rtwdev, mac_idx, true); 5329 rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); 5330 5331 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 5332 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL | 5333 B_AX_BFMEE_USE_NSTS | 5334 B_AX_BFMEE_CSI_GID_SEL | 5335 B_AX_BFMEE_CSI_FORCE_RETE_EN); 5336 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); 5337 rtw89_write32(rtwdev, reg, 5338 u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) | 5339 u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | 5340 u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); 5341 5342 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_CSIRPT_OPTION, mac_idx); 5343 rtw89_write32_set(rtwdev, reg, 5344 B_AX_CSIPRT_VHTSU_AID_EN | B_AX_CSIPRT_HESU_AID_EN); 5345 5346 return 0; 5347 } 5348 5349 static int rtw89_mac_set_csi_para_reg_ax(struct rtw89_dev *rtwdev, 5350 struct ieee80211_vif *vif, 5351 struct ieee80211_sta *sta) 5352 { 5353 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 5354 u8 mac_idx = rtwvif->mac_idx; 5355 u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; 5356 u8 port_sel = rtwvif->port; 5357 u8 sound_dim = 3, t; 5358 u8 *phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info; 5359 u32 reg; 5360 u16 val; 5361 int ret; 5362 5363 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 5364 if (ret) 5365 return ret; 5366 5367 if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || 5368 (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) { 5369 ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD); 5370 stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ); 5371 t = FIELD_GET(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 5372 phy_cap[5]); 5373 sound_dim = min(sound_dim, t); 5374 } 5375 if ((sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || 5376 (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { 5377 ldpc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); 5378 stbc_en &= !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); 5379 t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 5380 sta->deflink.vht_cap.cap); 5381 sound_dim = min(sound_dim, t); 5382 } 5383 nc = min(nc, sound_dim); 5384 nr = min(nr, sound_dim); 5385 5386 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 5387 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 5388 5389 val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) | 5390 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NR_MASK, nr) | 5391 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NG_MASK, ng) | 5392 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) | 5393 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CS_MASK, cs) | 5394 FIELD_PREP(B_AX_BFMEE_CSIINFO0_LDPC_EN, ldpc_en) | 5395 FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en); 5396 5397 if (port_sel == 0) 5398 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 5399 else 5400 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); 5401 5402 rtw89_write16(rtwdev, reg, val); 5403 5404 return 0; 5405 } 5406 5407 static int rtw89_mac_csi_rrsc_ax(struct rtw89_dev *rtwdev, 5408 struct ieee80211_vif *vif, 5409 struct ieee80211_sta *sta) 5410 { 5411 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 5412 u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); 5413 u32 reg; 5414 u8 mac_idx = rtwvif->mac_idx; 5415 int ret; 5416 5417 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 5418 if (ret) 5419 return ret; 5420 5421 if (sta->deflink.he_cap.has_he) { 5422 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | 5423 BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | 5424 BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); 5425 } 5426 if (sta->deflink.vht_cap.vht_supported) { 5427 rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | 5428 BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | 5429 BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); 5430 } 5431 if (sta->deflink.ht_cap.ht_supported) { 5432 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | 5433 BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | 5434 BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); 5435 } 5436 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 5437 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 5438 rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN); 5439 rtw89_write32(rtwdev, 5440 rtw89_mac_reg_by_idx(rtwdev, R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), 5441 rrsc); 5442 5443 return 0; 5444 } 5445 5446 static void rtw89_mac_bf_assoc_ax(struct rtw89_dev *rtwdev, 5447 struct ieee80211_vif *vif, 5448 struct ieee80211_sta *sta) 5449 { 5450 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 5451 5452 if (rtw89_sta_has_beamformer_cap(sta)) { 5453 rtw89_debug(rtwdev, RTW89_DBG_BF, 5454 "initialize bfee for new association\n"); 5455 rtw89_mac_init_bfee_ax(rtwdev, rtwvif->mac_idx); 5456 rtw89_mac_set_csi_para_reg_ax(rtwdev, vif, sta); 5457 rtw89_mac_csi_rrsc_ax(rtwdev, vif, sta); 5458 } 5459 } 5460 5461 void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 5462 struct ieee80211_sta *sta) 5463 { 5464 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 5465 5466 rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, false); 5467 } 5468 5469 void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 5470 struct ieee80211_bss_conf *conf) 5471 { 5472 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 5473 u8 mac_idx = rtwvif->mac_idx; 5474 __le32 *p; 5475 5476 rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n"); 5477 5478 p = (__le32 *)conf->mu_group.membership; 5479 rtw89_write32(rtwdev, 5480 rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN0, mac_idx), 5481 le32_to_cpu(p[0])); 5482 rtw89_write32(rtwdev, 5483 rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION_EN1, mac_idx), 5484 le32_to_cpu(p[1])); 5485 5486 p = (__le32 *)conf->mu_group.position; 5487 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION0, mac_idx), 5488 le32_to_cpu(p[0])); 5489 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION1, mac_idx), 5490 le32_to_cpu(p[1])); 5491 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION2, mac_idx), 5492 le32_to_cpu(p[2])); 5493 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_GID_POSITION3, mac_idx), 5494 le32_to_cpu(p[3])); 5495 } 5496 5497 struct rtw89_mac_bf_monitor_iter_data { 5498 struct rtw89_dev *rtwdev; 5499 struct ieee80211_sta *down_sta; 5500 int count; 5501 }; 5502 5503 static 5504 void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta) 5505 { 5506 struct rtw89_mac_bf_monitor_iter_data *iter_data = 5507 (struct rtw89_mac_bf_monitor_iter_data *)data; 5508 struct ieee80211_sta *down_sta = iter_data->down_sta; 5509 int *count = &iter_data->count; 5510 5511 if (down_sta == sta) 5512 return; 5513 5514 if (rtw89_sta_has_beamformer_cap(sta)) 5515 (*count)++; 5516 } 5517 5518 void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, 5519 struct ieee80211_sta *sta, bool disconnect) 5520 { 5521 struct rtw89_mac_bf_monitor_iter_data data; 5522 5523 data.rtwdev = rtwdev; 5524 data.down_sta = disconnect ? sta : NULL; 5525 data.count = 0; 5526 ieee80211_iterate_stations_atomic(rtwdev->hw, 5527 rtw89_mac_bf_monitor_calc_iter, 5528 &data); 5529 5530 rtw89_debug(rtwdev, RTW89_DBG_BF, "bfee STA count=%d\n", data.count); 5531 if (data.count) 5532 set_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 5533 else 5534 clear_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 5535 } 5536 5537 void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) 5538 { 5539 struct rtw89_traffic_stats *stats = &rtwdev->stats; 5540 struct rtw89_vif *rtwvif; 5541 bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv; 5542 bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 5543 bool keep_timer = true; 5544 bool old_keep_timer; 5545 5546 old_keep_timer = test_bit(RTW89_FLAG_BFEE_TIMER_KEEP, rtwdev->flags); 5547 5548 if (stats->tx_tfc_lv <= RTW89_TFC_LOW && stats->rx_tfc_lv <= RTW89_TFC_LOW) 5549 keep_timer = false; 5550 5551 if (keep_timer != old_keep_timer) { 5552 rtw89_for_each_rtwvif(rtwdev, rtwvif) 5553 rtw89_mac_bfee_standby_timer(rtwdev, rtwvif->mac_idx, 5554 keep_timer); 5555 } 5556 5557 if (en == old) 5558 return; 5559 5560 rtw89_for_each_rtwvif(rtwdev, rtwvif) 5561 rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, en); 5562 } 5563 5564 static int 5565 __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 5566 u32 tx_time) 5567 { 5568 #define MAC_AX_DFLT_TX_TIME 5280 5569 u8 mac_idx = rtwsta->rtwvif->mac_idx; 5570 u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time; 5571 u32 reg; 5572 int ret = 0; 5573 5574 if (rtwsta->cctl_tx_time) { 5575 rtwsta->ampdu_max_time = (max_tx_time - 512) >> 9; 5576 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 5577 } else { 5578 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 5579 if (ret) { 5580 rtw89_warn(rtwdev, "failed to check cmac in set txtime\n"); 5581 return ret; 5582 } 5583 5584 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx); 5585 rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK, 5586 max_tx_time >> 5); 5587 } 5588 5589 return ret; 5590 } 5591 5592 int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 5593 bool resume, u32 tx_time) 5594 { 5595 int ret = 0; 5596 5597 if (!resume) { 5598 rtwsta->cctl_tx_time = true; 5599 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); 5600 } else { 5601 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); 5602 rtwsta->cctl_tx_time = false; 5603 } 5604 5605 return ret; 5606 } 5607 5608 int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 5609 u32 *tx_time) 5610 { 5611 u8 mac_idx = rtwsta->rtwvif->mac_idx; 5612 u32 reg; 5613 int ret = 0; 5614 5615 if (rtwsta->cctl_tx_time) { 5616 *tx_time = (rtwsta->ampdu_max_time + 1) << 9; 5617 } else { 5618 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 5619 if (ret) { 5620 rtw89_warn(rtwdev, "failed to check cmac in tx_time\n"); 5621 return ret; 5622 } 5623 5624 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx); 5625 *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5; 5626 } 5627 5628 return ret; 5629 } 5630 5631 int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, 5632 struct rtw89_sta *rtwsta, 5633 bool resume, u8 tx_retry) 5634 { 5635 int ret = 0; 5636 5637 rtwsta->data_tx_cnt_lmt = tx_retry; 5638 5639 if (!resume) { 5640 rtwsta->cctl_tx_retry_limit = true; 5641 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 5642 } else { 5643 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 5644 rtwsta->cctl_tx_retry_limit = false; 5645 } 5646 5647 return ret; 5648 } 5649 5650 int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, 5651 struct rtw89_sta *rtwsta, u8 *tx_retry) 5652 { 5653 u8 mac_idx = rtwsta->rtwvif->mac_idx; 5654 u32 reg; 5655 int ret = 0; 5656 5657 if (rtwsta->cctl_tx_retry_limit) { 5658 *tx_retry = rtwsta->data_tx_cnt_lmt; 5659 } else { 5660 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 5661 if (ret) { 5662 rtw89_warn(rtwdev, "failed to check cmac in rty_lmt\n"); 5663 return ret; 5664 } 5665 5666 reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXCNT, mac_idx); 5667 *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK); 5668 } 5669 5670 return ret; 5671 } 5672 5673 int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, 5674 struct rtw89_vif *rtwvif, bool en) 5675 { 5676 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 5677 u8 mac_idx = rtwvif->mac_idx; 5678 u16 set = mac->muedca_ctrl.mask; 5679 u32 reg; 5680 u32 ret; 5681 5682 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 5683 if (ret) 5684 return ret; 5685 5686 reg = rtw89_mac_reg_by_idx(rtwdev, mac->muedca_ctrl.addr, mac_idx); 5687 if (en) 5688 rtw89_write16_set(rtwdev, reg, set); 5689 else 5690 rtw89_write16_clr(rtwdev, reg, set); 5691 5692 return 0; 5693 } 5694 5695 int rtw89_mac_write_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 val, u8 mask) 5696 { 5697 u32 val32; 5698 int ret; 5699 5700 val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) | 5701 FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, val) | 5702 FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, mask) | 5703 FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_WRITE) | 5704 FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1); 5705 rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32); 5706 5707 ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL), 5708 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL); 5709 if (ret) { 5710 rtw89_warn(rtwdev, "xtal si not ready(W): offset=%x val=%x mask=%x\n", 5711 offset, val, mask); 5712 return ret; 5713 } 5714 5715 return 0; 5716 } 5717 EXPORT_SYMBOL(rtw89_mac_write_xtal_si); 5718 5719 int rtw89_mac_read_xtal_si(struct rtw89_dev *rtwdev, u8 offset, u8 *val) 5720 { 5721 u32 val32; 5722 int ret; 5723 5724 val32 = FIELD_PREP(B_AX_WL_XTAL_SI_ADDR_MASK, offset) | 5725 FIELD_PREP(B_AX_WL_XTAL_SI_DATA_MASK, 0x00) | 5726 FIELD_PREP(B_AX_WL_XTAL_SI_BITMASK_MASK, 0x00) | 5727 FIELD_PREP(B_AX_WL_XTAL_SI_MODE_MASK, XTAL_SI_NORMAL_READ) | 5728 FIELD_PREP(B_AX_WL_XTAL_SI_CMD_POLL, 1); 5729 rtw89_write32(rtwdev, R_AX_WLAN_XTAL_SI_CTRL, val32); 5730 5731 ret = read_poll_timeout(rtw89_read32, val32, !(val32 & B_AX_WL_XTAL_SI_CMD_POLL), 5732 50, 50000, false, rtwdev, R_AX_WLAN_XTAL_SI_CTRL); 5733 if (ret) { 5734 rtw89_warn(rtwdev, "xtal si not ready(R): offset=%x\n", offset); 5735 return ret; 5736 } 5737 5738 *val = rtw89_read8(rtwdev, R_AX_WLAN_XTAL_SI_CTRL + 1); 5739 5740 return 0; 5741 } 5742 EXPORT_SYMBOL(rtw89_mac_read_xtal_si); 5743 5744 static 5745 void rtw89_mac_pkt_drop_sta(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta) 5746 { 5747 static const enum rtw89_pkt_drop_sel sels[] = { 5748 RTW89_PKT_DROP_SEL_MACID_BE_ONCE, 5749 RTW89_PKT_DROP_SEL_MACID_BK_ONCE, 5750 RTW89_PKT_DROP_SEL_MACID_VI_ONCE, 5751 RTW89_PKT_DROP_SEL_MACID_VO_ONCE, 5752 }; 5753 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 5754 struct rtw89_pkt_drop_params params = {0}; 5755 int i; 5756 5757 params.mac_band = RTW89_MAC_0; 5758 params.macid = rtwsta->mac_id; 5759 params.port = rtwvif->port; 5760 params.mbssid = 0; 5761 params.tf_trs = rtwvif->trigger; 5762 5763 for (i = 0; i < ARRAY_SIZE(sels); i++) { 5764 params.sel = sels[i]; 5765 rtw89_fw_h2c_pkt_drop(rtwdev, ¶ms); 5766 } 5767 } 5768 5769 static void rtw89_mac_pkt_drop_vif_iter(void *data, struct ieee80211_sta *sta) 5770 { 5771 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 5772 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 5773 struct rtw89_dev *rtwdev = rtwvif->rtwdev; 5774 struct rtw89_vif *target = data; 5775 5776 if (rtwvif != target) 5777 return; 5778 5779 rtw89_mac_pkt_drop_sta(rtwdev, rtwsta); 5780 } 5781 5782 void rtw89_mac_pkt_drop_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 5783 { 5784 ieee80211_iterate_stations_atomic(rtwdev->hw, 5785 rtw89_mac_pkt_drop_vif_iter, 5786 rtwvif); 5787 } 5788 5789 int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev, 5790 enum rtw89_mac_idx band) 5791 { 5792 struct rtw89_pkt_drop_params params = {0}; 5793 bool empty; 5794 int i, ret = 0, try_cnt = 3; 5795 5796 params.mac_band = band; 5797 params.sel = RTW89_PKT_DROP_SEL_BAND_ONCE; 5798 5799 for (i = 0; i < try_cnt; i++) { 5800 ret = read_poll_timeout(mac_is_txq_empty, empty, empty, 50, 5801 50000, false, rtwdev); 5802 if (ret && !RTW89_CHK_FW_FEATURE(NO_PACKET_DROP, &rtwdev->fw)) 5803 rtw89_fw_h2c_pkt_drop(rtwdev, ¶ms); 5804 else 5805 return 0; 5806 } 5807 return ret; 5808 } 5809 5810 static u8 rtw89_fw_get_rdy_ax(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 5811 { 5812 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 5813 5814 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 5815 } 5816 5817 static 5818 int rtw89_fwdl_check_path_ready_ax(struct rtw89_dev *rtwdev, 5819 bool h2c_or_fwdl) 5820 { 5821 u8 check = h2c_or_fwdl ? B_AX_H2C_PATH_RDY : B_AX_FWDL_PATH_RDY; 5822 u8 val; 5823 5824 return read_poll_timeout_atomic(rtw89_read8, val, val & check, 5825 1, FWDL_WAIT_CNT, false, 5826 rtwdev, R_AX_WCPU_FW_CTRL); 5827 } 5828 5829 const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { 5830 .band1_offset = RTW89_MAC_AX_BAND_REG_OFFSET, 5831 .filter_model_addr = R_AX_FILTER_MODEL_ADDR, 5832 .indir_access_addr = R_AX_INDIR_ACCESS_ENTRY, 5833 .mem_base_addrs = rtw89_mac_mem_base_addrs_ax, 5834 .rx_fltr = R_AX_RX_FLTR_OPT, 5835 .port_base = &rtw89_port_base_ax, 5836 .agg_len_ht = R_AX_AGG_LEN_HT_0, 5837 5838 .muedca_ctrl = { 5839 .addr = R_AX_MUEDCA_EN, 5840 .mask = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0, 5841 }, 5842 .bfee_ctrl = { 5843 .addr = R_AX_BFMEE_RESP_OPTION, 5844 .mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | 5845 B_AX_BFMEE_HE_NDPA_EN, 5846 }, 5847 5848 .bf_assoc = rtw89_mac_bf_assoc_ax, 5849 5850 .disable_cpu = rtw89_mac_disable_cpu_ax, 5851 .fwdl_enable_wcpu = rtw89_mac_enable_cpu_ax, 5852 .fwdl_get_status = rtw89_fw_get_rdy_ax, 5853 .fwdl_check_path_ready = rtw89_fwdl_check_path_ready_ax, 5854 .parse_efuse_map = rtw89_parse_efuse_map_ax, 5855 .parse_phycap_map = rtw89_parse_phycap_map_ax, 5856 .cnv_efuse_state = rtw89_cnv_efuse_state_ax, 5857 5858 .get_txpwr_cr = rtw89_mac_get_txpwr_cr_ax, 5859 }; 5860 EXPORT_SYMBOL(rtw89_mac_gen_ax); 5861