1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "debug.h" 7 #include "fw.h" 8 #include "mac.h" 9 #include "ps.h" 10 #include "reg.h" 11 #include "util.h" 12 13 int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx, 14 enum rtw89_mac_hwmod_sel sel) 15 { 16 u32 val, r_val; 17 18 if (sel == RTW89_DMAC_SEL) { 19 r_val = rtw89_read32(rtwdev, R_AX_DMAC_FUNC_EN); 20 val = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN); 21 } else if (sel == RTW89_CMAC_SEL && mac_idx == 0) { 22 r_val = rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN); 23 val = B_AX_CMAC_EN; 24 } else if (sel == RTW89_CMAC_SEL && mac_idx == 1) { 25 r_val = rtw89_read32(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND); 26 val = B_AX_CMAC1_FEN; 27 } else { 28 return -EINVAL; 29 } 30 if (r_val == RTW89_R32_EA || r_val == RTW89_R32_DEAD || 31 (val & r_val) != val) 32 return -EFAULT; 33 34 return 0; 35 } 36 37 int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val) 38 { 39 u8 lte_ctrl; 40 int ret; 41 42 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 43 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 44 if (ret) 45 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 46 47 rtw89_write32(rtwdev, R_AX_LTE_WDATA, val); 48 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0xC00F0000 | offset); 49 50 return ret; 51 } 52 53 int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val) 54 { 55 u8 lte_ctrl; 56 int ret; 57 58 ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0, 59 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3); 60 if (ret) 61 rtw89_err(rtwdev, "[ERR]lte not ready(W)\n"); 62 63 rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset); 64 *val = rtw89_read32(rtwdev, R_AX_LTE_RDATA); 65 66 return ret; 67 } 68 69 static 70 int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl) 71 { 72 u32 ctrl_reg, data_reg, ctrl_data; 73 u32 val; 74 int ret; 75 76 switch (ctrl->type) { 77 case DLE_CTRL_TYPE_WDE: 78 ctrl_reg = R_AX_WDE_DBG_FUN_INTF_CTL; 79 data_reg = R_AX_WDE_DBG_FUN_INTF_DATA; 80 ctrl_data = FIELD_PREP(B_AX_WDE_DFI_TRGSEL_MASK, ctrl->target) | 81 FIELD_PREP(B_AX_WDE_DFI_ADDR_MASK, ctrl->addr) | 82 B_AX_WDE_DFI_ACTIVE; 83 break; 84 case DLE_CTRL_TYPE_PLE: 85 ctrl_reg = R_AX_PLE_DBG_FUN_INTF_CTL; 86 data_reg = R_AX_PLE_DBG_FUN_INTF_DATA; 87 ctrl_data = FIELD_PREP(B_AX_PLE_DFI_TRGSEL_MASK, ctrl->target) | 88 FIELD_PREP(B_AX_PLE_DFI_ADDR_MASK, ctrl->addr) | 89 B_AX_PLE_DFI_ACTIVE; 90 break; 91 default: 92 rtw89_warn(rtwdev, "[ERR] dfi ctrl type %d\n", ctrl->type); 93 return -EINVAL; 94 } 95 96 rtw89_write32(rtwdev, ctrl_reg, ctrl_data); 97 98 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_WDE_DFI_ACTIVE), 99 1, 1000, false, rtwdev, ctrl_reg); 100 if (ret) { 101 rtw89_warn(rtwdev, "[ERR] dle dfi ctrl 0x%X set 0x%X timeout\n", 102 ctrl_reg, ctrl_data); 103 return ret; 104 } 105 106 ctrl->out_data = rtw89_read32(rtwdev, data_reg); 107 return 0; 108 } 109 110 static int dle_dfi_quota(struct rtw89_dev *rtwdev, 111 struct rtw89_mac_dle_dfi_quota *quota) 112 { 113 struct rtw89_mac_dle_dfi_ctrl ctrl; 114 int ret; 115 116 ctrl.type = quota->dle_type; 117 ctrl.target = DLE_DFI_TYPE_QUOTA; 118 ctrl.addr = quota->qtaid; 119 ret = dle_dfi_ctrl(rtwdev, &ctrl); 120 if (ret) { 121 rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); 122 return ret; 123 } 124 125 quota->rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, ctrl.out_data); 126 quota->use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, ctrl.out_data); 127 return 0; 128 } 129 130 static int dle_dfi_qempty(struct rtw89_dev *rtwdev, 131 struct rtw89_mac_dle_dfi_qempty *qempty) 132 { 133 struct rtw89_mac_dle_dfi_ctrl ctrl; 134 u32 ret; 135 136 ctrl.type = qempty->dle_type; 137 ctrl.target = DLE_DFI_TYPE_QEMPTY; 138 ctrl.addr = qempty->grpsel; 139 ret = dle_dfi_ctrl(rtwdev, &ctrl); 140 if (ret) { 141 rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret); 142 return ret; 143 } 144 145 qempty->qempty = FIELD_GET(B_AX_DLE_QEMPTY_GRP, ctrl.out_data); 146 return 0; 147 } 148 149 static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev) 150 { 151 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ", 152 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); 153 rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_ISR=0x%08x\n", 154 rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); 155 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_IMR=0x%08x ", 156 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); 157 rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_ISR=0x%08x\n", 158 rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); 159 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_IMR=0x%08x ", 160 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); 161 rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_ISR=0x%08x\n", 162 rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); 163 } 164 165 static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev) 166 { 167 struct rtw89_mac_dle_dfi_qempty qempty; 168 struct rtw89_mac_dle_dfi_quota quota; 169 struct rtw89_mac_dle_dfi_ctrl ctrl; 170 u32 val, not_empty, i; 171 int ret; 172 173 qempty.dle_type = DLE_CTRL_TYPE_PLE; 174 qempty.grpsel = 0; 175 qempty.qempty = ~(u32)0; 176 ret = dle_dfi_qempty(rtwdev, &qempty); 177 if (ret) 178 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 179 else 180 rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty); 181 182 for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) { 183 if (!(not_empty & BIT(0))) 184 continue; 185 ctrl.type = DLE_CTRL_TYPE_PLE; 186 ctrl.target = DLE_DFI_TYPE_QLNKTBL; 187 ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) | 188 FIELD_PREP(QLNKTBL_ADDR_TBL_IDX_MASK, i); 189 ret = dle_dfi_ctrl(rtwdev, &ctrl); 190 if (ret) 191 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 192 else 193 rtw89_info(rtwdev, "qidx%d pktcnt = %ld\n", i, 194 FIELD_GET(QLNKTBL_DATA_SEL1_PKT_CNT_MASK, 195 ctrl.out_data)); 196 } 197 198 quota.dle_type = DLE_CTRL_TYPE_PLE; 199 quota.qtaid = 6; 200 ret = dle_dfi_quota(rtwdev, "a); 201 if (ret) 202 rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__); 203 else 204 rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n", 205 quota.rsv_pgnum, quota.use_pgnum); 206 207 val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG); 208 rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%lx\n", 209 FIELD_GET(B_AX_PLE_Q6_MIN_SIZE_MASK, val)); 210 rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%lx\n", 211 FIELD_GET(B_AX_PLE_Q6_MAX_SIZE_MASK, val)); 212 213 dump_err_status_dispatcher(rtwdev); 214 } 215 216 static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev, 217 enum mac_ax_err_info err) 218 { 219 u32 dbg, event; 220 221 dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO); 222 event = FIELD_GET(B_AX_L0_TO_L1_EVENT_MASK, dbg); 223 224 switch (event) { 225 case MAC_AX_L0_TO_L1_RX_QTA_LOST: 226 rtw89_info(rtwdev, "quota lost!\n"); 227 rtw89_mac_dump_qta_lost(rtwdev); 228 break; 229 default: 230 break; 231 } 232 } 233 234 static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev, 235 enum mac_ax_err_info err) 236 { 237 u32 dmac_err, cmac_err; 238 239 if (err != MAC_AX_ERR_L1_ERR_DMAC && 240 err != MAC_AX_ERR_L0_PROMOTE_TO_L1) 241 return; 242 243 rtw89_info(rtwdev, "--->\nerr=0x%x\n", err); 244 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n", 245 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 246 247 cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR); 248 rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR =0x%08x\n", cmac_err); 249 dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); 250 rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR =0x%08x\n", dmac_err); 251 252 if (dmac_err) { 253 rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG =0x%08x ", 254 rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG)); 255 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG =0x%08x\n", 256 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG)); 257 } 258 259 if (dmac_err & B_AX_WDRLS_ERR_FLAG) { 260 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR =0x%08x ", 261 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR)); 262 rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR =0x%08x\n", 263 rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); 264 } 265 266 if (dmac_err & B_AX_WSEC_ERR_FLAG) { 267 rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR =0x%08x\n", 268 rtw89_read32(rtwdev, R_AX_SEC_DEBUG)); 269 rtw89_info(rtwdev, "SEC_local_Register 0x9D00 =0x%08x\n", 270 rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); 271 rtw89_info(rtwdev, "SEC_local_Register 0x9D04 =0x%08x\n", 272 rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); 273 rtw89_info(rtwdev, "SEC_local_Register 0x9D10 =0x%08x\n", 274 rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); 275 rtw89_info(rtwdev, "SEC_local_Register 0x9D14 =0x%08x\n", 276 rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); 277 rtw89_info(rtwdev, "SEC_local_Register 0x9D18 =0x%08x\n", 278 rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA)); 279 rtw89_info(rtwdev, "SEC_local_Register 0x9D20 =0x%08x\n", 280 rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); 281 rtw89_info(rtwdev, "SEC_local_Register 0x9D24 =0x%08x\n", 282 rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); 283 rtw89_info(rtwdev, "SEC_local_Register 0x9D28 =0x%08x\n", 284 rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT)); 285 rtw89_info(rtwdev, "SEC_local_Register 0x9D2C =0x%08x\n", 286 rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT)); 287 } 288 289 if (dmac_err & B_AX_MPDU_ERR_FLAG) { 290 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR =0x%08x ", 291 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR)); 292 rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR =0x%08x\n", 293 rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); 294 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR =0x%08x ", 295 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR)); 296 rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR =0x%08x\n", 297 rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); 298 } 299 300 if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) { 301 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR =0x%08x ", 302 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR)); 303 rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR= 0x%08x\n", 304 rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); 305 } 306 307 if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) { 308 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ", 309 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 310 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 311 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 312 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ", 313 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 314 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 315 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 316 dump_err_status_dispatcher(rtwdev); 317 } 318 319 if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) { 320 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", 321 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); 322 rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", 323 rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); 324 } 325 326 if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) { 327 rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ", 328 rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); 329 rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n", 330 rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); 331 rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ", 332 rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); 333 rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", 334 rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); 335 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_0=0x%08x\n", 336 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0)); 337 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_1=0x%08x\n", 338 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1)); 339 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n", 340 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2)); 341 rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n", 342 rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS)); 343 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n", 344 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0)); 345 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n", 346 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1)); 347 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n", 348 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2)); 349 rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n", 350 rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS)); 351 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n", 352 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0)); 353 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n", 354 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1)); 355 rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n", 356 rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2)); 357 dump_err_status_dispatcher(rtwdev); 358 } 359 360 if (dmac_err & B_AX_PKTIN_ERR_FLAG) { 361 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ", 362 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); 363 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n", 364 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); 365 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ", 366 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); 367 rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n", 368 rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); 369 } 370 371 if (dmac_err & B_AX_DISPATCH_ERR_FLAG) 372 dump_err_status_dispatcher(rtwdev); 373 374 if (dmac_err & B_AX_DLE_CPUIO_ERR_FLAG) { 375 rtw89_info(rtwdev, "R_AX_CPUIO_ERR_IMR=0x%08x ", 376 rtw89_read32(rtwdev, R_AX_CPUIO_ERR_IMR)); 377 rtw89_info(rtwdev, "R_AX_CPUIO_ERR_ISR=0x%08x\n", 378 rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR)); 379 } 380 381 if (dmac_err & BIT(11)) { 382 rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", 383 rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); 384 } 385 386 if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) { 387 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR=0x%08x ", 388 rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR)); 389 rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR=0x%04x\n", 390 rtw89_read16(rtwdev, R_AX_SCHEDULE_ERR_ISR)); 391 } 392 393 if (cmac_err & B_AX_PTCL_TOP_ERR_IND) { 394 rtw89_info(rtwdev, "R_AX_PTCL_IMR0=0x%08x ", 395 rtw89_read32(rtwdev, R_AX_PTCL_IMR0)); 396 rtw89_info(rtwdev, "R_AX_PTCL_ISR0=0x%08x\n", 397 rtw89_read32(rtwdev, R_AX_PTCL_ISR0)); 398 } 399 400 if (cmac_err & B_AX_DMA_TOP_ERR_IND) { 401 rtw89_info(rtwdev, "R_AX_DLE_CTRL=0x%08x\n", 402 rtw89_read32(rtwdev, R_AX_DLE_CTRL)); 403 } 404 405 if (cmac_err & B_AX_PHYINTF_ERR_IND) { 406 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR=0x%08x\n", 407 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR)); 408 } 409 410 if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) { 411 rtw89_info(rtwdev, "R_AX_TXPWR_IMR=0x%08x ", 412 rtw89_read32(rtwdev, R_AX_TXPWR_IMR)); 413 rtw89_info(rtwdev, "R_AX_TXPWR_ISR=0x%08x\n", 414 rtw89_read32(rtwdev, R_AX_TXPWR_ISR)); 415 } 416 417 if (cmac_err & B_AX_WMAC_RX_ERR_IND) { 418 rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x ", 419 rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL)); 420 rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR=0x%08x\n", 421 rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR)); 422 } 423 424 if (cmac_err & B_AX_WMAC_TX_ERR_IND) { 425 rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR=0x%08x ", 426 rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR)); 427 rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x\n", 428 rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL)); 429 } 430 431 rtwdev->hci.ops->dump_err_status(rtwdev); 432 433 if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1) 434 rtw89_mac_dump_l0_to_l1(rtwdev, err); 435 436 rtw89_info(rtwdev, "<---\n"); 437 } 438 439 u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev) 440 { 441 u32 err; 442 int ret; 443 444 ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000, 445 false, rtwdev, R_AX_HALT_C2H_CTRL); 446 if (ret) { 447 rtw89_warn(rtwdev, "Polling FW err status fail\n"); 448 return ret; 449 } 450 451 err = rtw89_read32(rtwdev, R_AX_HALT_C2H); 452 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 453 454 rtw89_fw_st_dbg_dump(rtwdev); 455 rtw89_mac_dump_err_status(rtwdev, err); 456 457 return err; 458 } 459 EXPORT_SYMBOL(rtw89_mac_get_err_status); 460 461 int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err) 462 { 463 u32 halt; 464 int ret = 0; 465 466 if (err > MAC_AX_SET_ERR_MAX) { 467 rtw89_err(rtwdev, "Bad set-err-status value 0x%08x\n", err); 468 return -EINVAL; 469 } 470 471 ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000, 472 100000, false, rtwdev, R_AX_HALT_H2C_CTRL); 473 if (ret) { 474 rtw89_err(rtwdev, "FW doesn't receive previous msg\n"); 475 return -EFAULT; 476 } 477 478 rtw89_write32(rtwdev, R_AX_HALT_H2C, err); 479 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER); 480 481 return 0; 482 } 483 EXPORT_SYMBOL(rtw89_mac_set_err_status); 484 485 const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie = { 486 2, 40, 0, 0, 1, 0, 0, 0 487 }; 488 EXPORT_SYMBOL(rtw89_hfc_preccfg_pcie); 489 490 static int hfc_reset_param(struct rtw89_dev *rtwdev) 491 { 492 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 493 struct rtw89_hfc_param_ini param_ini = {NULL}; 494 u8 qta_mode = rtwdev->mac.dle_info.qta_mode; 495 496 switch (rtwdev->hci.type) { 497 case RTW89_HCI_TYPE_PCIE: 498 param_ini = rtwdev->chip->hfc_param_ini[qta_mode]; 499 param->en = 0; 500 break; 501 default: 502 return -EINVAL; 503 } 504 505 if (param_ini.pub_cfg) 506 param->pub_cfg = *param_ini.pub_cfg; 507 508 if (param_ini.prec_cfg) { 509 param->prec_cfg = *param_ini.prec_cfg; 510 rtwdev->hal.sw_amsdu_max_size = 511 param->prec_cfg.wp_ch07_prec * HFC_PAGE_UNIT; 512 } 513 514 if (param_ini.ch_cfg) 515 param->ch_cfg = param_ini.ch_cfg; 516 517 memset(¶m->ch_info, 0, sizeof(param->ch_info)); 518 memset(¶m->pub_info, 0, sizeof(param->pub_info)); 519 param->mode = param_ini.mode; 520 521 return 0; 522 } 523 524 static int hfc_ch_cfg_chk(struct rtw89_dev *rtwdev, u8 ch) 525 { 526 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 527 const struct rtw89_hfc_ch_cfg *ch_cfg = param->ch_cfg; 528 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 529 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 530 531 if (ch >= RTW89_DMA_CH_NUM) 532 return -EINVAL; 533 534 if ((ch_cfg[ch].min && ch_cfg[ch].min < prec_cfg->ch011_prec) || 535 ch_cfg[ch].max > pub_cfg->pub_max) 536 return -EINVAL; 537 if (ch_cfg[ch].grp >= grp_num) 538 return -EINVAL; 539 540 return 0; 541 } 542 543 static int hfc_pub_info_chk(struct rtw89_dev *rtwdev) 544 { 545 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 546 const struct rtw89_hfc_pub_cfg *cfg = ¶m->pub_cfg; 547 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 548 549 if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) { 550 if (rtwdev->chip->chip_id == RTL8852A) 551 return 0; 552 else 553 return -EFAULT; 554 } 555 556 return 0; 557 } 558 559 static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev) 560 { 561 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 562 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 563 564 if (pub_cfg->grp0 + pub_cfg->grp1 != pub_cfg->pub_max) 565 return -EFAULT; 566 567 return 0; 568 } 569 570 static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch) 571 { 572 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 573 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 574 int ret = 0; 575 u32 val = 0; 576 577 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 578 if (ret) 579 return ret; 580 581 ret = hfc_ch_cfg_chk(rtwdev, ch); 582 if (ret) 583 return ret; 584 585 if (ch > RTW89_DMA_B1HI) 586 return -EINVAL; 587 588 val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) | 589 u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) | 590 (cfg[ch].grp ? B_AX_GRP : 0); 591 rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val); 592 593 return 0; 594 } 595 596 static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch) 597 { 598 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 599 struct rtw89_hfc_ch_info *info = param->ch_info; 600 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg; 601 u32 val; 602 u32 ret; 603 604 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 605 if (ret) 606 return ret; 607 608 if (ch > RTW89_DMA_H2C) 609 return -EINVAL; 610 611 val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4); 612 info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK); 613 if (ch < RTW89_DMA_H2C) 614 info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK); 615 else 616 info[ch].used = cfg[ch].min - info[ch].aval; 617 618 return 0; 619 } 620 621 static int hfc_pub_ctrl(struct rtw89_dev *rtwdev) 622 { 623 const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg; 624 u32 val; 625 int ret; 626 627 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 628 if (ret) 629 return ret; 630 631 ret = hfc_pub_cfg_chk(rtwdev); 632 if (ret) 633 return ret; 634 635 val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) | 636 u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK); 637 rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val); 638 639 val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK); 640 rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val); 641 642 return 0; 643 } 644 645 static int hfc_upd_mix_info(struct rtw89_dev *rtwdev) 646 { 647 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 648 struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 649 struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 650 struct rtw89_hfc_pub_info *info = ¶m->pub_info; 651 u32 val; 652 int ret; 653 654 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 655 if (ret) 656 return ret; 657 658 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1); 659 info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK); 660 info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK); 661 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3); 662 info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK); 663 info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK); 664 info->pub_aval = 665 u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2), 666 B_AX_PUB_AVAL_PG_MASK); 667 info->wp_aval = 668 u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1), 669 B_AX_WP_AVAL_PG_MASK); 670 671 val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL); 672 param->en = val & B_AX_HCI_FC_EN ? 1 : 0; 673 param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0; 674 param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK); 675 prec_cfg->ch011_full_cond = 676 u32_get_bits(val, B_AX_HCI_FC_WD_FULL_COND_MASK); 677 prec_cfg->h2c_full_cond = 678 u32_get_bits(val, B_AX_HCI_FC_CH12_FULL_COND_MASK); 679 prec_cfg->wp_ch07_full_cond = 680 u32_get_bits(val, B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 681 prec_cfg->wp_ch811_full_cond = 682 u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 683 684 val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL); 685 prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK); 686 prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK); 687 688 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2); 689 pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK); 690 691 val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1); 692 prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK); 693 prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK); 694 695 val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2); 696 pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK); 697 698 val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1); 699 pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK); 700 pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK); 701 702 ret = hfc_pub_info_chk(rtwdev); 703 if (param->en && ret) 704 return ret; 705 706 return 0; 707 } 708 709 static void hfc_h2c_cfg(struct rtw89_dev *rtwdev) 710 { 711 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 712 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 713 u32 val; 714 715 val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 716 rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val); 717 718 rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL, 719 B_AX_HCI_FC_CH12_FULL_COND_MASK, 720 prec_cfg->h2c_full_cond); 721 } 722 723 static void hfc_mix_cfg(struct rtw89_dev *rtwdev) 724 { 725 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 726 const struct rtw89_hfc_pub_cfg *pub_cfg = ¶m->pub_cfg; 727 const struct rtw89_hfc_prec_cfg *prec_cfg = ¶m->prec_cfg; 728 u32 val; 729 730 val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) | 731 u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK); 732 rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val); 733 734 val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK); 735 rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val); 736 737 val = u32_encode_bits(prec_cfg->wp_ch07_prec, 738 B_AX_PREC_PAGE_WP_CH07_MASK) | 739 u32_encode_bits(prec_cfg->wp_ch811_prec, 740 B_AX_PREC_PAGE_WP_CH811_MASK); 741 rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val); 742 743 val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL), 744 param->mode, B_AX_HCI_FC_MODE_MASK); 745 val = u32_replace_bits(val, prec_cfg->ch011_full_cond, 746 B_AX_HCI_FC_WD_FULL_COND_MASK); 747 val = u32_replace_bits(val, prec_cfg->h2c_full_cond, 748 B_AX_HCI_FC_CH12_FULL_COND_MASK); 749 val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond, 750 B_AX_HCI_FC_WP_CH07_FULL_COND_MASK); 751 val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond, 752 B_AX_HCI_FC_WP_CH811_FULL_COND_MASK); 753 rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val); 754 } 755 756 static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en) 757 { 758 struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param; 759 u32 val; 760 761 val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL); 762 param->en = en; 763 param->h2c_en = h2c_en; 764 val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN); 765 val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) : 766 (val & ~B_AX_HCI_FC_CH12_EN); 767 rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val); 768 } 769 770 static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en) 771 { 772 u8 ch; 773 u32 ret = 0; 774 775 if (reset) 776 ret = hfc_reset_param(rtwdev); 777 if (ret) 778 return ret; 779 780 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 781 if (ret) 782 return ret; 783 784 hfc_func_en(rtwdev, false, false); 785 786 if (!en && h2c_en) { 787 hfc_h2c_cfg(rtwdev); 788 hfc_func_en(rtwdev, en, h2c_en); 789 return ret; 790 } 791 792 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 793 ret = hfc_ch_ctrl(rtwdev, ch); 794 if (ret) 795 return ret; 796 } 797 798 ret = hfc_pub_ctrl(rtwdev); 799 if (ret) 800 return ret; 801 802 hfc_mix_cfg(rtwdev); 803 if (en || h2c_en) { 804 hfc_func_en(rtwdev, en, h2c_en); 805 udelay(10); 806 } 807 for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) { 808 ret = hfc_upd_ch_info(rtwdev, ch); 809 if (ret) 810 return ret; 811 } 812 ret = hfc_upd_mix_info(rtwdev); 813 814 return ret; 815 } 816 817 #define PWR_POLL_CNT 2000 818 static int pwr_cmd_poll(struct rtw89_dev *rtwdev, 819 const struct rtw89_pwr_cfg *cfg) 820 { 821 u8 val = 0; 822 int ret; 823 u32 addr = cfg->base == PWR_INTF_MSK_SDIO ? 824 cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr; 825 826 ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk), 827 1000, 1000 * PWR_POLL_CNT, false, rtwdev, addr); 828 829 if (!ret) 830 return 0; 831 832 rtw89_warn(rtwdev, "[ERR] Polling timeout\n"); 833 rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr); 834 rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val); 835 836 return -EBUSY; 837 } 838 839 static int rtw89_mac_sub_pwr_seq(struct rtw89_dev *rtwdev, u8 cv_msk, 840 u8 intf_msk, const struct rtw89_pwr_cfg *cfg) 841 { 842 const struct rtw89_pwr_cfg *cur_cfg; 843 u32 addr; 844 u8 val; 845 846 for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) { 847 if (!(cur_cfg->intf_msk & intf_msk) || 848 !(cur_cfg->cv_msk & cv_msk)) 849 continue; 850 851 switch (cur_cfg->cmd) { 852 case PWR_CMD_WRITE: 853 addr = cur_cfg->addr; 854 855 if (cur_cfg->base == PWR_BASE_SDIO) 856 addr |= SDIO_LOCAL_BASE_ADDR; 857 858 val = rtw89_read8(rtwdev, addr); 859 val &= ~(cur_cfg->msk); 860 val |= (cur_cfg->val & cur_cfg->msk); 861 862 rtw89_write8(rtwdev, addr, val); 863 break; 864 case PWR_CMD_POLL: 865 if (pwr_cmd_poll(rtwdev, cur_cfg)) 866 return -EBUSY; 867 break; 868 case PWR_CMD_DELAY: 869 if (cur_cfg->val == PWR_DELAY_US) 870 udelay(cur_cfg->addr); 871 else 872 fsleep(cur_cfg->addr * 1000); 873 break; 874 default: 875 return -EINVAL; 876 } 877 } 878 879 return 0; 880 } 881 882 static int rtw89_mac_pwr_seq(struct rtw89_dev *rtwdev, 883 const struct rtw89_pwr_cfg * const *cfg_seq) 884 { 885 int ret; 886 887 for (; *cfg_seq; cfg_seq++) { 888 ret = rtw89_mac_sub_pwr_seq(rtwdev, BIT(rtwdev->hal.cv), 889 PWR_INTF_MSK_PCIE, *cfg_seq); 890 if (ret) 891 return -EBUSY; 892 } 893 894 return 0; 895 } 896 897 static enum rtw89_rpwm_req_pwr_state 898 rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev) 899 { 900 enum rtw89_rpwm_req_pwr_state state; 901 902 switch (rtwdev->ps_mode) { 903 case RTW89_PS_MODE_RFOFF: 904 state = RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF; 905 break; 906 case RTW89_PS_MODE_CLK_GATED: 907 state = RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED; 908 break; 909 case RTW89_PS_MODE_PWR_GATED: 910 state = RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED; 911 break; 912 default: 913 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 914 break; 915 } 916 return state; 917 } 918 919 static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev, 920 enum rtw89_rpwm_req_pwr_state req_pwr_state) 921 { 922 u16 request; 923 924 request = rtw89_read16(rtwdev, R_AX_RPWM); 925 request ^= request | PS_RPWM_TOGGLE; 926 927 rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) & 928 RPWM_SEQ_NUM_MAX; 929 request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num); 930 931 request |= req_pwr_state; 932 933 if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 934 request |= PS_RPWM_ACK; 935 936 rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request); 937 } 938 939 static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev, 940 enum rtw89_rpwm_req_pwr_state req_pwr_state) 941 { 942 bool request_deep_mode; 943 bool in_deep_mode; 944 u8 rpwm_req_num; 945 u8 cpwm_rsp_seq; 946 u8 cpwm_seq; 947 u8 cpwm_status; 948 949 if (req_pwr_state >= RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED) 950 request_deep_mode = true; 951 else 952 request_deep_mode = false; 953 954 if (rtw89_read32_mask(rtwdev, R_AX_LDM, B_AX_EN_32K)) 955 in_deep_mode = true; 956 else 957 in_deep_mode = false; 958 959 if (request_deep_mode != in_deep_mode) 960 return -EPERM; 961 962 if (request_deep_mode) 963 return 0; 964 965 rpwm_req_num = rtwdev->mac.rpwm_seq_num; 966 cpwm_rsp_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, 967 PS_CPWM_RSP_SEQ_NUM); 968 969 if (rpwm_req_num != cpwm_rsp_seq) 970 return -EPERM; 971 972 rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) & 973 CPWM_SEQ_NUM_MAX; 974 975 cpwm_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_SEQ_NUM); 976 if (cpwm_seq != rtwdev->mac.cpwm_seq_num) 977 return -EPERM; 978 979 cpwm_status = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_STATE); 980 if (cpwm_status != req_pwr_state) 981 return -EPERM; 982 983 return 0; 984 } 985 986 void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter) 987 { 988 enum rtw89_rpwm_req_pwr_state state; 989 int ret; 990 991 if (enter) 992 state = rtw89_mac_get_req_pwr_state(rtwdev); 993 else 994 state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE; 995 996 rtw89_mac_send_rpwm(rtwdev, state); 997 ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret, 998 1000, 15000, false, rtwdev, state); 999 if (ret) 1000 rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n", 1001 enter ? "entering" : "leaving"); 1002 } 1003 1004 static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) 1005 { 1006 #define PWR_ACT 1 1007 const struct rtw89_chip_info *chip = rtwdev->chip; 1008 const struct rtw89_pwr_cfg * const *cfg_seq; 1009 struct rtw89_hal *hal = &rtwdev->hal; 1010 int ret; 1011 u8 val; 1012 1013 if (on) 1014 cfg_seq = chip->pwr_on_seq; 1015 else 1016 cfg_seq = chip->pwr_off_seq; 1017 1018 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 1019 __rtw89_leave_ps_mode(rtwdev); 1020 1021 val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK); 1022 if (on && val == PWR_ACT) { 1023 rtw89_err(rtwdev, "MAC has already powered on\n"); 1024 return -EBUSY; 1025 } 1026 1027 ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq); 1028 if (ret) 1029 return ret; 1030 1031 if (on) { 1032 set_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1033 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR); 1034 } else { 1035 clear_bit(RTW89_FLAG_POWERON, rtwdev->flags); 1036 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 1037 rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR); 1038 hal->current_channel = 0; 1039 } 1040 1041 return 0; 1042 #undef PWR_ACT 1043 } 1044 1045 void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev) 1046 { 1047 rtw89_mac_power_switch(rtwdev, false); 1048 } 1049 1050 static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 1051 { 1052 u32 func_en = 0; 1053 u32 ck_en = 0; 1054 u32 c1pc_en = 0; 1055 u32 addrl_func_en[] = {R_AX_CMAC_FUNC_EN, R_AX_CMAC_FUNC_EN_C1}; 1056 u32 addrl_ck_en[] = {R_AX_CK_EN, R_AX_CK_EN_C1}; 1057 1058 func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN | 1059 B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN | 1060 B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN; 1061 ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN | 1062 B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN | 1063 B_AX_RMAC_CKEN; 1064 c1pc_en = B_AX_R_SYM_WLCMAC1_PC_EN | 1065 B_AX_R_SYM_WLCMAC1_P1_PC_EN | 1066 B_AX_R_SYM_WLCMAC1_P2_PC_EN | 1067 B_AX_R_SYM_WLCMAC1_P3_PC_EN | 1068 B_AX_R_SYM_WLCMAC1_P4_PC_EN; 1069 1070 if (en) { 1071 if (mac_idx == RTW89_MAC_1) { 1072 rtw89_write32_set(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1073 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1074 B_AX_R_SYM_ISO_CMAC12PP); 1075 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1076 B_AX_CMAC1_FEN); 1077 } 1078 rtw89_write32_set(rtwdev, addrl_ck_en[mac_idx], ck_en); 1079 rtw89_write32_set(rtwdev, addrl_func_en[mac_idx], func_en); 1080 } else { 1081 rtw89_write32_clr(rtwdev, addrl_func_en[mac_idx], func_en); 1082 rtw89_write32_clr(rtwdev, addrl_ck_en[mac_idx], ck_en); 1083 if (mac_idx == RTW89_MAC_1) { 1084 rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1085 B_AX_CMAC1_FEN); 1086 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 1087 B_AX_R_SYM_ISO_CMAC12PP); 1088 rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, c1pc_en); 1089 } 1090 } 1091 1092 return 0; 1093 } 1094 1095 static int dmac_func_en(struct rtw89_dev *rtwdev) 1096 { 1097 u32 val32; 1098 1099 val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN | 1100 B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN | 1101 B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN | 1102 B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN); 1103 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32); 1104 1105 val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN | 1106 B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN | 1107 B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN | 1108 B_AX_WD_RLS_CLK_EN); 1109 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); 1110 1111 return 0; 1112 } 1113 1114 static int chip_func_en(struct rtw89_dev *rtwdev) 1115 { 1116 rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK); 1117 1118 return 0; 1119 } 1120 1121 static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev) 1122 { 1123 int ret; 1124 1125 ret = dmac_func_en(rtwdev); 1126 if (ret) 1127 return ret; 1128 1129 ret = cmac_func_en(rtwdev, 0, true); 1130 if (ret) 1131 return ret; 1132 1133 ret = chip_func_en(rtwdev); 1134 if (ret) 1135 return ret; 1136 1137 return ret; 1138 } 1139 1140 /* PCIE 64 */ 1141 const struct rtw89_dle_size rtw89_wde_size0 = { 1142 RTW89_WDE_PG_64, 4095, 1, 1143 }; 1144 EXPORT_SYMBOL(rtw89_wde_size0); 1145 1146 /* DLFW */ 1147 const struct rtw89_dle_size rtw89_wde_size4 = { 1148 RTW89_WDE_PG_64, 0, 4096, 1149 }; 1150 EXPORT_SYMBOL(rtw89_wde_size4); 1151 1152 /* PCIE */ 1153 const struct rtw89_dle_size rtw89_ple_size0 = { 1154 RTW89_PLE_PG_128, 1520, 16, 1155 }; 1156 EXPORT_SYMBOL(rtw89_ple_size0); 1157 1158 /* DLFW */ 1159 const struct rtw89_dle_size rtw89_ple_size4 = { 1160 RTW89_PLE_PG_128, 64, 1472, 1161 }; 1162 EXPORT_SYMBOL(rtw89_ple_size4); 1163 1164 /* PCIE 64 */ 1165 const struct rtw89_wde_quota rtw89_wde_qt0 = { 1166 3792, 196, 0, 107, 1167 }; 1168 EXPORT_SYMBOL(rtw89_wde_qt0); 1169 1170 /* DLFW */ 1171 const struct rtw89_wde_quota rtw89_wde_qt4 = { 1172 0, 0, 0, 0, 1173 }; 1174 EXPORT_SYMBOL(rtw89_wde_qt4); 1175 1176 /* PCIE SCC */ 1177 const struct rtw89_ple_quota rtw89_ple_qt4 = { 1178 264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8, 1179 }; 1180 EXPORT_SYMBOL(rtw89_ple_qt4); 1181 1182 /* PCIE SCC */ 1183 const struct rtw89_ple_quota rtw89_ple_qt5 = { 1184 264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120, 1185 }; 1186 EXPORT_SYMBOL(rtw89_ple_qt5); 1187 1188 /* DLFW */ 1189 const struct rtw89_ple_quota rtw89_ple_qt13 = { 1190 0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0 1191 }; 1192 EXPORT_SYMBOL(rtw89_ple_qt13); 1193 1194 static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev, 1195 enum rtw89_qta_mode mode) 1196 { 1197 struct rtw89_mac_info *mac = &rtwdev->mac; 1198 const struct rtw89_dle_mem *cfg; 1199 1200 cfg = &rtwdev->chip->dle_mem[mode]; 1201 if (!cfg) 1202 return NULL; 1203 1204 if (cfg->mode != mode) { 1205 rtw89_warn(rtwdev, "qta mode unmatch!\n"); 1206 return NULL; 1207 } 1208 1209 mac->dle_info.wde_pg_size = cfg->wde_size->pge_size; 1210 mac->dle_info.ple_pg_size = cfg->ple_size->pge_size; 1211 mac->dle_info.qta_mode = mode; 1212 mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma; 1213 mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma; 1214 1215 return cfg; 1216 } 1217 1218 static inline u32 dle_used_size(const struct rtw89_dle_size *wde, 1219 const struct rtw89_dle_size *ple) 1220 { 1221 return wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) + 1222 ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num); 1223 } 1224 1225 static void dle_func_en(struct rtw89_dev *rtwdev, bool enable) 1226 { 1227 if (enable) 1228 rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN, 1229 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1230 else 1231 rtw89_write32_clr(rtwdev, R_AX_DMAC_FUNC_EN, 1232 B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN); 1233 } 1234 1235 static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable) 1236 { 1237 if (enable) 1238 rtw89_write32_set(rtwdev, R_AX_DMAC_CLK_EN, 1239 B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN); 1240 else 1241 rtw89_write32_clr(rtwdev, R_AX_DMAC_CLK_EN, 1242 B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN); 1243 } 1244 1245 static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg) 1246 { 1247 const struct rtw89_dle_size *size_cfg; 1248 u32 val; 1249 u8 bound = 0; 1250 1251 val = rtw89_read32(rtwdev, R_AX_WDE_PKTBUF_CFG); 1252 size_cfg = cfg->wde_size; 1253 1254 switch (size_cfg->pge_size) { 1255 default: 1256 case RTW89_WDE_PG_64: 1257 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64, 1258 B_AX_WDE_PAGE_SEL_MASK); 1259 break; 1260 case RTW89_WDE_PG_128: 1261 val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128, 1262 B_AX_WDE_PAGE_SEL_MASK); 1263 break; 1264 case RTW89_WDE_PG_256: 1265 rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n"); 1266 return -EINVAL; 1267 } 1268 1269 val = u32_replace_bits(val, bound, B_AX_WDE_START_BOUND_MASK); 1270 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1271 B_AX_WDE_FREE_PAGE_NUM_MASK); 1272 rtw89_write32(rtwdev, R_AX_WDE_PKTBUF_CFG, val); 1273 1274 val = rtw89_read32(rtwdev, R_AX_PLE_PKTBUF_CFG); 1275 bound = (size_cfg->lnk_pge_num + size_cfg->unlnk_pge_num) 1276 * size_cfg->pge_size / DLE_BOUND_UNIT; 1277 size_cfg = cfg->ple_size; 1278 1279 switch (size_cfg->pge_size) { 1280 default: 1281 case RTW89_PLE_PG_64: 1282 rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n"); 1283 return -EINVAL; 1284 case RTW89_PLE_PG_128: 1285 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128, 1286 B_AX_PLE_PAGE_SEL_MASK); 1287 break; 1288 case RTW89_PLE_PG_256: 1289 val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256, 1290 B_AX_PLE_PAGE_SEL_MASK); 1291 break; 1292 } 1293 1294 val = u32_replace_bits(val, bound, B_AX_PLE_START_BOUND_MASK); 1295 val = u32_replace_bits(val, size_cfg->lnk_pge_num, 1296 B_AX_PLE_FREE_PAGE_NUM_MASK); 1297 rtw89_write32(rtwdev, R_AX_PLE_PKTBUF_CFG, val); 1298 1299 return 0; 1300 } 1301 1302 #define INVALID_QT_WCPU U16_MAX 1303 #define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \ 1304 do { \ 1305 val = ((_min_x) & \ 1306 B_AX_ ## _module ## _MIN_SIZE_MASK) | \ 1307 (((_max_x) << 16) & \ 1308 B_AX_ ## _module ## _MAX_SIZE_MASK); \ 1309 rtw89_write32(rtwdev, \ 1310 R_AX_ ## _module ## _QTA ## _idx ## _CFG, \ 1311 val); \ 1312 } while (0) 1313 #define SET_QUOTA(_x, _module, _idx) \ 1314 SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx) 1315 1316 static void wde_quota_cfg(struct rtw89_dev *rtwdev, 1317 const struct rtw89_wde_quota *min_cfg, 1318 const struct rtw89_wde_quota *max_cfg, 1319 u16 ext_wde_min_qt_wcpu) 1320 { 1321 u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ? 1322 ext_wde_min_qt_wcpu : min_cfg->wcpu; 1323 u32 val; 1324 1325 SET_QUOTA(hif, WDE, 0); 1326 SET_QUOTA_VAL(min_qt_wcpu, max_cfg->wcpu, WDE, 1); 1327 SET_QUOTA(pkt_in, WDE, 3); 1328 SET_QUOTA(cpu_io, WDE, 4); 1329 } 1330 1331 static void ple_quota_cfg(struct rtw89_dev *rtwdev, 1332 const struct rtw89_ple_quota *min_cfg, 1333 const struct rtw89_ple_quota *max_cfg) 1334 { 1335 u32 val; 1336 1337 SET_QUOTA(cma0_tx, PLE, 0); 1338 SET_QUOTA(cma1_tx, PLE, 1); 1339 SET_QUOTA(c2h, PLE, 2); 1340 SET_QUOTA(h2c, PLE, 3); 1341 SET_QUOTA(wcpu, PLE, 4); 1342 SET_QUOTA(mpdu_proc, PLE, 5); 1343 SET_QUOTA(cma0_dma, PLE, 6); 1344 SET_QUOTA(cma1_dma, PLE, 7); 1345 SET_QUOTA(bb_rpt, PLE, 8); 1346 SET_QUOTA(wd_rel, PLE, 9); 1347 SET_QUOTA(cpu_io, PLE, 10); 1348 } 1349 1350 #undef SET_QUOTA 1351 1352 static void dle_quota_cfg(struct rtw89_dev *rtwdev, 1353 const struct rtw89_dle_mem *cfg, 1354 u16 ext_wde_min_qt_wcpu) 1355 { 1356 wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu); 1357 ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt); 1358 } 1359 1360 static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode, 1361 enum rtw89_qta_mode ext_mode) 1362 { 1363 const struct rtw89_dle_mem *cfg, *ext_cfg; 1364 u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU; 1365 int ret = 0; 1366 u32 ini; 1367 1368 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1369 if (ret) 1370 return ret; 1371 1372 cfg = get_dle_mem_cfg(rtwdev, mode); 1373 if (!cfg) { 1374 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 1375 ret = -EINVAL; 1376 goto error; 1377 } 1378 1379 if (mode == RTW89_QTA_DLFW) { 1380 ext_cfg = get_dle_mem_cfg(rtwdev, ext_mode); 1381 if (!ext_cfg) { 1382 rtw89_err(rtwdev, "[ERR]get_dle_ext_mem_cfg %d\n", 1383 ext_mode); 1384 ret = -EINVAL; 1385 goto error; 1386 } 1387 ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu; 1388 } 1389 1390 if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) { 1391 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 1392 ret = -EINVAL; 1393 goto error; 1394 } 1395 1396 dle_func_en(rtwdev, false); 1397 dle_clk_en(rtwdev, true); 1398 1399 ret = dle_mix_cfg(rtwdev, cfg); 1400 if (ret) { 1401 rtw89_err(rtwdev, "[ERR] dle mix cfg\n"); 1402 goto error; 1403 } 1404 dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu); 1405 1406 dle_func_en(rtwdev, true); 1407 1408 ret = read_poll_timeout(rtw89_read32, ini, 1409 (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, 1410 2000, false, rtwdev, R_AX_WDE_INI_STATUS); 1411 if (ret) { 1412 rtw89_err(rtwdev, "[ERR]WDE cfg ready\n"); 1413 return ret; 1414 } 1415 1416 ret = read_poll_timeout(rtw89_read32, ini, 1417 (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1, 1418 2000, false, rtwdev, R_AX_PLE_INI_STATUS); 1419 if (ret) { 1420 rtw89_err(rtwdev, "[ERR]PLE cfg ready\n"); 1421 return ret; 1422 } 1423 1424 return 0; 1425 error: 1426 dle_func_en(rtwdev, false); 1427 rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n", 1428 rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS)); 1429 rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n", 1430 rtw89_read32(rtwdev, R_AX_PLE_INI_STATUS)); 1431 1432 return ret; 1433 } 1434 1435 static bool dle_is_txq_empty(struct rtw89_dev *rtwdev) 1436 { 1437 u32 msk32; 1438 u32 val32; 1439 1440 msk32 = B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC | B_AX_WDE_EMPTY_QUE_CMAC0_MBH | 1441 B_AX_WDE_EMPTY_QUE_CMAC1_MBH | B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 | 1442 B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 | B_AX_WDE_EMPTY_QUE_OTHERS | 1443 B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | B_AX_PLE_EMPTY_QTA_DMAC_H2C | 1444 B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QUE_DMAC_PKTIN | 1445 B_AX_WDE_EMPTY_QTA_DMAC_HIF | B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU | 1446 B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_CPUIO | 1447 B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL | 1448 B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL | 1449 B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX | 1450 B_AX_PLE_EMPTY_QTA_DMAC_CPUIO | 1451 B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU | 1452 B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU; 1453 val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0); 1454 1455 if ((val32 & msk32) == msk32) 1456 return true; 1457 1458 return false; 1459 } 1460 1461 static int sta_sch_init(struct rtw89_dev *rtwdev) 1462 { 1463 u32 p_val; 1464 u8 val; 1465 int ret; 1466 1467 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1468 if (ret) 1469 return ret; 1470 1471 val = rtw89_read8(rtwdev, R_AX_SS_CTRL); 1472 val |= B_AX_SS_EN; 1473 rtw89_write8(rtwdev, R_AX_SS_CTRL, val); 1474 1475 ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_AX_SS_INIT_DONE_1, 1476 1, TRXCFG_WAIT_CNT, false, rtwdev, R_AX_SS_CTRL); 1477 if (ret) { 1478 rtw89_err(rtwdev, "[ERR]STA scheduler init\n"); 1479 return ret; 1480 } 1481 1482 rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG); 1483 1484 return 0; 1485 } 1486 1487 static int mpdu_proc_init(struct rtw89_dev *rtwdev) 1488 { 1489 int ret; 1490 1491 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1492 if (ret) 1493 return ret; 1494 1495 rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); 1496 rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD); 1497 rtw89_write32_set(rtwdev, R_AX_MPDU_PROC, 1498 B_AX_APPEND_FCS | B_AX_A_ICV_ERR); 1499 rtw89_write32(rtwdev, R_AX_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL); 1500 1501 return 0; 1502 } 1503 1504 static int sec_eng_init(struct rtw89_dev *rtwdev) 1505 { 1506 u32 val = 0; 1507 int ret; 1508 1509 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 1510 if (ret) 1511 return ret; 1512 1513 val = rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL); 1514 /* init clock */ 1515 val |= (B_AX_CLK_EN_CGCMP | B_AX_CLK_EN_WAPI | B_AX_CLK_EN_WEP_TKIP); 1516 /* init TX encryption */ 1517 val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC); 1518 val |= (B_AX_MC_DEC | B_AX_BC_DEC); 1519 val &= ~B_AX_TX_PARTIAL_MODE; 1520 rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val); 1521 1522 /* init MIC ICV append */ 1523 val = rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC); 1524 val |= (B_AX_APPEND_ICV | B_AX_APPEND_MIC); 1525 1526 /* option init */ 1527 rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val); 1528 1529 return 0; 1530 } 1531 1532 static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1533 { 1534 int ret; 1535 1536 ret = dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID); 1537 if (ret) { 1538 rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret); 1539 return ret; 1540 } 1541 1542 ret = hfc_init(rtwdev, true, true, true); 1543 if (ret) { 1544 rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret); 1545 return ret; 1546 } 1547 1548 ret = sta_sch_init(rtwdev); 1549 if (ret) { 1550 rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret); 1551 return ret; 1552 } 1553 1554 ret = mpdu_proc_init(rtwdev); 1555 if (ret) { 1556 rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret); 1557 return ret; 1558 } 1559 1560 ret = sec_eng_init(rtwdev); 1561 if (ret) { 1562 rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret); 1563 return ret; 1564 } 1565 1566 return ret; 1567 } 1568 1569 static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1570 { 1571 u32 val, reg; 1572 u16 p_val; 1573 int ret; 1574 1575 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1576 if (ret) 1577 return ret; 1578 1579 reg = rtw89_mac_reg_by_idx(R_AX_ADDR_CAM_CTRL, mac_idx); 1580 1581 val = rtw89_read32(rtwdev, reg); 1582 val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) | 1583 B_AX_ADDR_CAM_CLR | B_AX_ADDR_CAM_EN; 1584 rtw89_write32(rtwdev, reg, val); 1585 1586 ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR), 1587 1, TRXCFG_WAIT_CNT, false, rtwdev, B_AX_ADDR_CAM_CLR); 1588 if (ret) { 1589 rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n"); 1590 return ret; 1591 } 1592 1593 return 0; 1594 } 1595 1596 static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1597 { 1598 u32 ret; 1599 u32 reg; 1600 1601 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1602 if (ret) 1603 return ret; 1604 1605 reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx); 1606 rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US); 1607 1608 return 0; 1609 } 1610 1611 static int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev, 1612 enum rtw89_machdr_frame_type type, 1613 enum rtw89_mac_fwd_target fwd_target, 1614 u8 mac_idx) 1615 { 1616 u32 reg; 1617 u32 val; 1618 1619 switch (fwd_target) { 1620 case RTW89_FWD_DONT_CARE: 1621 val = RX_FLTR_FRAME_DROP; 1622 break; 1623 case RTW89_FWD_TO_HOST: 1624 val = RX_FLTR_FRAME_TO_HOST; 1625 break; 1626 case RTW89_FWD_TO_WLAN_CPU: 1627 val = RX_FLTR_FRAME_TO_WLCPU; 1628 break; 1629 default: 1630 rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n"); 1631 return -EINVAL; 1632 } 1633 1634 switch (type) { 1635 case RTW89_MGNT: 1636 reg = rtw89_mac_reg_by_idx(R_AX_MGNT_FLTR, mac_idx); 1637 break; 1638 case RTW89_CTRL: 1639 reg = rtw89_mac_reg_by_idx(R_AX_CTRL_FLTR, mac_idx); 1640 break; 1641 case RTW89_DATA: 1642 reg = rtw89_mac_reg_by_idx(R_AX_DATA_FLTR, mac_idx); 1643 break; 1644 default: 1645 rtw89_err(rtwdev, "[ERR]set rx filter type err\n"); 1646 return -EINVAL; 1647 } 1648 rtw89_write32(rtwdev, reg, val); 1649 1650 return 0; 1651 } 1652 1653 static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1654 { 1655 int ret, i; 1656 u32 mac_ftlr, plcp_ftlr; 1657 1658 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1659 if (ret) 1660 return ret; 1661 1662 for (i = RTW89_MGNT; i <= RTW89_DATA; i++) { 1663 ret = rtw89_mac_typ_fltr_opt(rtwdev, i, RTW89_FWD_TO_HOST, 1664 mac_idx); 1665 if (ret) 1666 return ret; 1667 } 1668 mac_ftlr = rtwdev->hal.rx_fltr; 1669 plcp_ftlr = B_AX_CCK_CRC_CHK | B_AX_CCK_SIG_CHK | 1670 B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK | 1671 B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK | 1672 B_AX_HE_SIGB_CRC_CHK; 1673 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx), 1674 mac_ftlr); 1675 rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx), 1676 plcp_ftlr); 1677 1678 return 0; 1679 } 1680 1681 static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx) 1682 { 1683 u32 reg, val32; 1684 u32 b_rsp_chk_nav, b_rsp_chk_cca; 1685 1686 b_rsp_chk_nav = B_AX_RSP_CHK_TXNAV | B_AX_RSP_CHK_INTRA_NAV | 1687 B_AX_RSP_CHK_BASIC_NAV; 1688 b_rsp_chk_cca = B_AX_RSP_CHK_SEC_CCA_80 | B_AX_RSP_CHK_SEC_CCA_40 | 1689 B_AX_RSP_CHK_SEC_CCA_20 | B_AX_RSP_CHK_BTCCA | 1690 B_AX_RSP_CHK_EDCCA | B_AX_RSP_CHK_CCA; 1691 1692 switch (rtwdev->chip->chip_id) { 1693 case RTL8852A: 1694 case RTL8852B: 1695 reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); 1696 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav; 1697 rtw89_write32(rtwdev, reg, val32); 1698 1699 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1700 val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca; 1701 rtw89_write32(rtwdev, reg, val32); 1702 break; 1703 default: 1704 reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx); 1705 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav; 1706 rtw89_write32(rtwdev, reg, val32); 1707 1708 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1709 val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca; 1710 rtw89_write32(rtwdev, reg, val32); 1711 break; 1712 } 1713 } 1714 1715 static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1716 { 1717 u32 val, reg; 1718 int ret; 1719 1720 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1721 if (ret) 1722 return ret; 1723 1724 reg = rtw89_mac_reg_by_idx(R_AX_CCA_CONTROL, mac_idx); 1725 val = rtw89_read32(rtwdev, reg); 1726 val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA | 1727 B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 | 1728 B_AX_SIFS_CHK_BTCCA | B_AX_SIFS_CHK_CCA_P20 | 1729 B_AX_CTN_CHK_INTRA_NAV | 1730 B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA | 1731 B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 | 1732 B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 | 1733 B_AX_CTN_CHK_CCA_P20 | B_AX_SIFS_CHK_EDCCA); 1734 val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 | 1735 B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 | 1736 B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 | 1737 B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV); 1738 1739 rtw89_write32(rtwdev, reg, val); 1740 1741 _patch_dis_resp_chk(rtwdev, mac_idx); 1742 1743 return 0; 1744 } 1745 1746 static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1747 { 1748 u32 reg; 1749 int ret; 1750 1751 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1752 if (ret) 1753 return ret; 1754 reg = rtw89_mac_reg_by_idx(R_AX_RX_SR_CTRL, mac_idx); 1755 rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN); 1756 1757 return 0; 1758 } 1759 1760 static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1761 { 1762 u32 reg; 1763 int ret; 1764 1765 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1766 if (ret) 1767 return ret; 1768 1769 reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx); 1770 rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN); 1771 1772 return 0; 1773 } 1774 1775 static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1776 { 1777 u32 reg, val, sifs; 1778 int ret; 1779 1780 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1781 if (ret) 1782 return ret; 1783 1784 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx); 1785 val = rtw89_read32(rtwdev, reg); 1786 val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK; 1787 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK); 1788 1789 switch (rtwdev->chip->chip_id) { 1790 case RTL8852A: 1791 sifs = WMAC_SPEC_SIFS_OFDM_52A; 1792 break; 1793 case RTL8852B: 1794 sifs = WMAC_SPEC_SIFS_OFDM_52B; 1795 break; 1796 default: 1797 sifs = WMAC_SPEC_SIFS_OFDM_52C; 1798 break; 1799 } 1800 val &= ~B_AX_WMAC_SPEC_SIFS_OFDM_MASK; 1801 val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs); 1802 rtw89_write32(rtwdev, reg, val); 1803 1804 reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx); 1805 rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN); 1806 1807 return 0; 1808 } 1809 1810 static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1811 { 1812 #define TRXCFG_RMAC_CCA_TO 32 1813 #define TRXCFG_RMAC_DATA_TO 15 1814 #define RX_MAX_LEN_UNIT 512 1815 #define PLD_RLS_MAX_PG 127 1816 int ret; 1817 u32 reg, rx_max_len, rx_qta; 1818 u16 val; 1819 1820 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1821 if (ret) 1822 return ret; 1823 1824 reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx); 1825 rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL); 1826 1827 reg = rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx); 1828 val = rtw89_read16(rtwdev, reg); 1829 val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO, 1830 B_AX_RX_DLK_DATA_TIME_MASK); 1831 val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO, 1832 B_AX_RX_DLK_CCA_TIME_MASK); 1833 rtw89_write16(rtwdev, reg, val); 1834 1835 reg = rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx); 1836 rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1); 1837 1838 reg = rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx); 1839 if (mac_idx == RTW89_MAC_0) 1840 rx_qta = rtwdev->mac.dle_info.c0_rx_qta; 1841 else 1842 rx_qta = rtwdev->mac.dle_info.c1_rx_qta; 1843 rx_qta = rx_qta > PLD_RLS_MAX_PG ? PLD_RLS_MAX_PG : rx_qta; 1844 rx_max_len = (rx_qta - 1) * rtwdev->mac.dle_info.ple_pg_size / 1845 RX_MAX_LEN_UNIT; 1846 rx_max_len = rx_max_len > B_AX_RX_MPDU_MAX_LEN_SIZE ? 1847 B_AX_RX_MPDU_MAX_LEN_SIZE : rx_max_len; 1848 rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); 1849 1850 if (rtwdev->chip->chip_id == RTL8852A && 1851 rtwdev->hal.cv == CHIP_CBV) { 1852 rtw89_write16_mask(rtwdev, 1853 rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx), 1854 B_AX_RX_DLK_CCA_TIME_MASK, 0); 1855 rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx), 1856 BIT(12)); 1857 } 1858 1859 reg = rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx); 1860 rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK); 1861 1862 return ret; 1863 } 1864 1865 static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1866 { 1867 u32 val, reg; 1868 int ret; 1869 1870 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1871 if (ret) 1872 return ret; 1873 1874 reg = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx); 1875 val = rtw89_read32(rtwdev, reg); 1876 val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK); 1877 val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK); 1878 val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK); 1879 rtw89_write32(rtwdev, reg, val); 1880 1881 return 0; 1882 } 1883 1884 static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 1885 { 1886 const struct rtw89_dle_mem *cfg; 1887 1888 cfg = get_dle_mem_cfg(rtwdev, mode); 1889 if (!cfg) { 1890 rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n"); 1891 return false; 1892 } 1893 1894 return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma); 1895 } 1896 1897 static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1898 { 1899 u32 val, reg; 1900 int ret; 1901 1902 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 1903 if (ret) 1904 return ret; 1905 1906 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 1907 reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); 1908 val = rtw89_read32(rtwdev, reg); 1909 val = u32_replace_bits(val, S_AX_CTS2S_TH_1K, 1910 B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK); 1911 val |= B_AX_HW_CTS2SELF_EN; 1912 rtw89_write32(rtwdev, reg, val); 1913 1914 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_FSM_MON, mac_idx); 1915 val = rtw89_read32(rtwdev, reg); 1916 val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK); 1917 val &= ~B_AX_PTCL_TX_ARB_TO_MODE; 1918 rtw89_write32(rtwdev, reg, val); 1919 } 1920 1921 reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx); 1922 val = rtw89_read32(rtwdev, reg); 1923 val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK); 1924 val |= B_AX_HW_CTS2SELF_EN; 1925 rtw89_write32(rtwdev, reg, val); 1926 1927 return 0; 1928 } 1929 1930 static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx) 1931 { 1932 int ret; 1933 1934 ret = scheduler_init(rtwdev, mac_idx); 1935 if (ret) { 1936 rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret); 1937 return ret; 1938 } 1939 1940 ret = addr_cam_init(rtwdev, mac_idx); 1941 if (ret) { 1942 rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx, 1943 ret); 1944 return ret; 1945 } 1946 1947 ret = rx_fltr_init(rtwdev, mac_idx); 1948 if (ret) { 1949 rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx, 1950 ret); 1951 return ret; 1952 } 1953 1954 ret = cca_ctrl_init(rtwdev, mac_idx); 1955 if (ret) { 1956 rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx, 1957 ret); 1958 return ret; 1959 } 1960 1961 ret = spatial_reuse_init(rtwdev, mac_idx); 1962 if (ret) { 1963 rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n", 1964 mac_idx, ret); 1965 return ret; 1966 } 1967 1968 ret = tmac_init(rtwdev, mac_idx); 1969 if (ret) { 1970 rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret); 1971 return ret; 1972 } 1973 1974 ret = trxptcl_init(rtwdev, mac_idx); 1975 if (ret) { 1976 rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret); 1977 return ret; 1978 } 1979 1980 ret = rmac_init(rtwdev, mac_idx); 1981 if (ret) { 1982 rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret); 1983 return ret; 1984 } 1985 1986 ret = cmac_com_init(rtwdev, mac_idx); 1987 if (ret) { 1988 rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret); 1989 return ret; 1990 } 1991 1992 ret = ptcl_init(rtwdev, mac_idx); 1993 if (ret) { 1994 rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret); 1995 return ret; 1996 } 1997 1998 return ret; 1999 } 2000 2001 static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev, 2002 struct rtw89_mac_c2h_info *c2h_info) 2003 { 2004 struct rtw89_mac_h2c_info h2c_info = {0}; 2005 u32 ret; 2006 2007 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE; 2008 h2c_info.content_len = 0; 2009 2010 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info); 2011 if (ret) 2012 return ret; 2013 2014 if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP) 2015 return -EINVAL; 2016 2017 return 0; 2018 } 2019 2020 int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev) 2021 { 2022 struct rtw89_hal *hal = &rtwdev->hal; 2023 const struct rtw89_chip_info *chip = rtwdev->chip; 2024 struct rtw89_mac_c2h_info c2h_info = {0}; 2025 struct rtw89_c2h_phy_cap *cap = 2026 (struct rtw89_c2h_phy_cap *)&c2h_info.c2hreg[0]; 2027 u32 ret; 2028 2029 ret = rtw89_mac_read_phycap(rtwdev, &c2h_info); 2030 if (ret) 2031 return ret; 2032 2033 hal->tx_nss = cap->tx_nss ? 2034 min_t(u8, cap->tx_nss, chip->tx_nss) : chip->tx_nss; 2035 hal->rx_nss = cap->rx_nss ? 2036 min_t(u8, cap->rx_nss, chip->rx_nss) : chip->rx_nss; 2037 2038 rtw89_debug(rtwdev, RTW89_DBG_FW, 2039 "phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n", 2040 hal->tx_nss, cap->tx_nss, chip->tx_nss, 2041 hal->rx_nss, cap->rx_nss, chip->rx_nss); 2042 2043 return 0; 2044 } 2045 2046 static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band, 2047 u16 tx_en_u16, u16 mask_u16) 2048 { 2049 u32 ret; 2050 struct rtw89_mac_c2h_info c2h_info = {0}; 2051 struct rtw89_mac_h2c_info h2c_info = {0}; 2052 struct rtw89_h2creg_sch_tx_en *h2creg = 2053 (struct rtw89_h2creg_sch_tx_en *)h2c_info.h2creg; 2054 2055 h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN; 2056 h2c_info.content_len = sizeof(*h2creg) - RTW89_H2CREG_HDR_LEN; 2057 h2creg->tx_en = tx_en_u16; 2058 h2creg->mask = mask_u16; 2059 h2creg->band = band; 2060 2061 ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); 2062 if (ret) 2063 return ret; 2064 2065 if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT) 2066 return -EINVAL; 2067 2068 return 0; 2069 } 2070 2071 static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx, 2072 u16 tx_en, u16 tx_en_mask) 2073 { 2074 u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx); 2075 u16 val; 2076 int ret; 2077 2078 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2079 if (ret) 2080 return ret; 2081 2082 if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags)) 2083 return rtw89_hw_sch_tx_en_h2c(rtwdev, mac_idx, 2084 tx_en, tx_en_mask); 2085 2086 val = rtw89_read16(rtwdev, reg); 2087 val = (val & ~tx_en_mask) | (tx_en & tx_en_mask); 2088 rtw89_write16(rtwdev, reg, val); 2089 2090 return 0; 2091 } 2092 2093 int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, 2094 u16 *tx_en, enum rtw89_sch_tx_sel sel) 2095 { 2096 int ret; 2097 2098 *tx_en = rtw89_read16(rtwdev, 2099 rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx)); 2100 2101 switch (sel) { 2102 case RTW89_SCH_TX_SEL_ALL: 2103 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff); 2104 if (ret) 2105 return ret; 2106 break; 2107 case RTW89_SCH_TX_SEL_HIQ: 2108 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 2109 0, B_AX_CTN_TXEN_HGQ); 2110 if (ret) 2111 return ret; 2112 break; 2113 case RTW89_SCH_TX_SEL_MG0: 2114 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 2115 0, B_AX_CTN_TXEN_MGQ); 2116 if (ret) 2117 return ret; 2118 break; 2119 case RTW89_SCH_TX_SEL_MACID: 2120 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff); 2121 if (ret) 2122 return ret; 2123 break; 2124 default: 2125 return 0; 2126 } 2127 2128 return 0; 2129 } 2130 EXPORT_SYMBOL(rtw89_mac_stop_sch_tx); 2131 2132 int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en) 2133 { 2134 int ret; 2135 2136 ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff); 2137 if (ret) 2138 return ret; 2139 2140 return 0; 2141 } 2142 EXPORT_SYMBOL(rtw89_mac_resume_sch_tx); 2143 2144 static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len, 2145 bool wd) 2146 { 2147 u32 val, reg; 2148 int ret; 2149 2150 reg = wd ? R_AX_WD_BUF_REQ : R_AX_PL_BUF_REQ; 2151 val = buf_len; 2152 val |= B_AX_WD_BUF_REQ_EXEC; 2153 rtw89_write32(rtwdev, reg, val); 2154 2155 reg = wd ? R_AX_WD_BUF_STATUS : R_AX_PL_BUF_STATUS; 2156 2157 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_BUF_STAT_DONE, 2158 1, 2000, false, rtwdev, reg); 2159 if (ret) 2160 return 0xffff; 2161 2162 return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val); 2163 } 2164 2165 static int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev, 2166 struct rtw89_cpuio_ctrl *ctrl_para, 2167 bool wd) 2168 { 2169 u32 val, cmd_type, reg; 2170 int ret; 2171 2172 cmd_type = ctrl_para->cmd_type; 2173 2174 reg = wd ? R_AX_WD_CPUQ_OP_2 : R_AX_PL_CPUQ_OP_2; 2175 val = 0; 2176 val = u32_replace_bits(val, ctrl_para->start_pktid, 2177 B_AX_WD_CPUQ_OP_STRT_PKTID_MASK); 2178 val = u32_replace_bits(val, ctrl_para->end_pktid, 2179 B_AX_WD_CPUQ_OP_END_PKTID_MASK); 2180 rtw89_write32(rtwdev, reg, val); 2181 2182 reg = wd ? R_AX_WD_CPUQ_OP_1 : R_AX_PL_CPUQ_OP_1; 2183 val = 0; 2184 val = u32_replace_bits(val, ctrl_para->src_pid, 2185 B_AX_CPUQ_OP_SRC_PID_MASK); 2186 val = u32_replace_bits(val, ctrl_para->src_qid, 2187 B_AX_CPUQ_OP_SRC_QID_MASK); 2188 val = u32_replace_bits(val, ctrl_para->dst_pid, 2189 B_AX_CPUQ_OP_DST_PID_MASK); 2190 val = u32_replace_bits(val, ctrl_para->dst_qid, 2191 B_AX_CPUQ_OP_DST_QID_MASK); 2192 rtw89_write32(rtwdev, reg, val); 2193 2194 reg = wd ? R_AX_WD_CPUQ_OP_0 : R_AX_PL_CPUQ_OP_0; 2195 val = 0; 2196 val = u32_replace_bits(val, cmd_type, 2197 B_AX_CPUQ_OP_CMD_TYPE_MASK); 2198 val = u32_replace_bits(val, ctrl_para->macid, 2199 B_AX_CPUQ_OP_MACID_MASK); 2200 val = u32_replace_bits(val, ctrl_para->pkt_num, 2201 B_AX_CPUQ_OP_PKTNUM_MASK); 2202 val |= B_AX_WD_CPUQ_OP_EXEC; 2203 rtw89_write32(rtwdev, reg, val); 2204 2205 reg = wd ? R_AX_WD_CPUQ_OP_STATUS : R_AX_PL_CPUQ_OP_STATUS; 2206 2207 ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_CPUQ_OP_STAT_DONE, 2208 1, 2000, false, rtwdev, reg); 2209 if (ret) 2210 return ret; 2211 2212 if (cmd_type == CPUIO_OP_CMD_GET_1ST_PID || 2213 cmd_type == CPUIO_OP_CMD_GET_NEXT_PID) 2214 ctrl_para->pktid = FIELD_GET(B_AX_WD_CPUQ_OP_PKTID_MASK, val); 2215 2216 return 0; 2217 } 2218 2219 static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode) 2220 { 2221 const struct rtw89_dle_mem *cfg; 2222 struct rtw89_cpuio_ctrl ctrl_para = {0}; 2223 u16 pkt_id; 2224 int ret; 2225 2226 cfg = get_dle_mem_cfg(rtwdev, mode); 2227 if (!cfg) { 2228 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 2229 return -EINVAL; 2230 } 2231 2232 if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) { 2233 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n"); 2234 return -EINVAL; 2235 } 2236 2237 dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU); 2238 2239 pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true); 2240 if (pkt_id == 0xffff) { 2241 rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n"); 2242 return -ENOMEM; 2243 } 2244 2245 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 2246 ctrl_para.start_pktid = pkt_id; 2247 ctrl_para.end_pktid = pkt_id; 2248 ctrl_para.pkt_num = 0; 2249 ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS; 2250 ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT; 2251 ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true); 2252 if (ret) { 2253 rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n"); 2254 return -EFAULT; 2255 } 2256 2257 pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, false); 2258 if (pkt_id == 0xffff) { 2259 rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n"); 2260 return -ENOMEM; 2261 } 2262 2263 ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD; 2264 ctrl_para.start_pktid = pkt_id; 2265 ctrl_para.end_pktid = pkt_id; 2266 ctrl_para.pkt_num = 0; 2267 ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS; 2268 ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT; 2269 ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, false); 2270 if (ret) { 2271 rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n"); 2272 return -EFAULT; 2273 } 2274 2275 return 0; 2276 } 2277 2278 static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx) 2279 { 2280 int ret; 2281 u32 reg; 2282 u8 val; 2283 2284 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 2285 if (ret) 2286 return ret; 2287 2288 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_TX_CTN_SEL, mac_idx); 2289 2290 ret = read_poll_timeout(rtw89_read8, val, 2291 (val & B_AX_PTCL_TX_ON_STAT) == 0, 2292 SW_CVR_DUR_US, 2293 SW_CVR_DUR_US * PTCL_IDLE_POLL_CNT, 2294 false, rtwdev, reg); 2295 if (ret) 2296 return ret; 2297 2298 return 0; 2299 } 2300 2301 static int band1_enable(struct rtw89_dev *rtwdev) 2302 { 2303 int ret, i; 2304 u32 sleep_bak[4] = {0}; 2305 u32 pause_bak[4] = {0}; 2306 u16 tx_en; 2307 2308 ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL); 2309 if (ret) { 2310 rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret); 2311 return ret; 2312 } 2313 2314 for (i = 0; i < 4; i++) { 2315 sleep_bak[i] = rtw89_read32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4); 2316 pause_bak[i] = rtw89_read32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4); 2317 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, U32_MAX); 2318 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, U32_MAX); 2319 } 2320 2321 ret = band_idle_ck_b(rtwdev, 0); 2322 if (ret) { 2323 rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret); 2324 return ret; 2325 } 2326 2327 ret = dle_quota_change(rtwdev, rtwdev->mac.qta_mode); 2328 if (ret) { 2329 rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret); 2330 return ret; 2331 } 2332 2333 for (i = 0; i < 4; i++) { 2334 rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, sleep_bak[i]); 2335 rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]); 2336 } 2337 2338 ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en); 2339 if (ret) { 2340 rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret); 2341 return ret; 2342 } 2343 2344 ret = cmac_func_en(rtwdev, 1, true); 2345 if (ret) { 2346 rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret); 2347 return ret; 2348 } 2349 2350 ret = cmac_init(rtwdev, 1); 2351 if (ret) { 2352 rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret); 2353 return ret; 2354 } 2355 2356 rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND, 2357 B_AX_R_SYM_FEN_WLBBFUN_1 | B_AX_R_SYM_FEN_WLBBGLB_1); 2358 2359 return 0; 2360 } 2361 2362 static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx, 2363 enum rtw89_mac_hwmod_sel sel) 2364 { 2365 u32 reg, val; 2366 int ret; 2367 2368 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel); 2369 if (ret) { 2370 rtw89_err(rtwdev, "MAC%d mac_idx%d is not ready\n", 2371 sel, mac_idx); 2372 return ret; 2373 } 2374 2375 if (sel == RTW89_DMAC_SEL) { 2376 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, 2377 B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | 2378 B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN | 2379 B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN); 2380 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1, 2381 B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN | 2382 B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN); 2383 rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR, 2384 B_AX_HDT_PKT_FAIL_DBG_INT_EN | 2385 B_AX_HDT_OFFSET_UNMATCH_INT_EN); 2386 rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR, 2387 B_AX_CPU_SHIFT_EN_ERR_INT_EN); 2388 rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR, 2389 B_AX_PLE_GETNPG_STRPG_ERR_INT_EN); 2390 rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR, 2391 B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN); 2392 rtw89_write32_set(rtwdev, R_AX_HD0IMR, B_AX_WDT_PTFM_INT_EN); 2393 rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR, 2394 B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN); 2395 } else if (sel == RTW89_CMAC_SEL) { 2396 reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx); 2397 rtw89_write32_clr(rtwdev, reg, 2398 B_AX_SORT_NON_IDLE_ERR_INT_EN); 2399 2400 reg = rtw89_mac_reg_by_idx(R_AX_DLE_CTRL, mac_idx); 2401 rtw89_write32_clr(rtwdev, reg, 2402 B_AX_NO_RESERVE_PAGE_ERR_IMR | 2403 B_AX_RXDATA_FSM_HANG_ERROR_IMR); 2404 2405 reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx); 2406 val = B_AX_F2PCMD_USER_ALLC_ERR_INT_EN | 2407 B_AX_TX_RECORD_PKTID_ERR_INT_EN | 2408 B_AX_FSM_TIMEOUT_ERR_INT_EN; 2409 rtw89_write32(rtwdev, reg, val); 2410 2411 reg = rtw89_mac_reg_by_idx(R_AX_PHYINFO_ERR_IMR, mac_idx); 2412 rtw89_write32_set(rtwdev, reg, 2413 B_AX_PHY_TXON_TIMEOUT_INT_EN | 2414 B_AX_CCK_CCA_TIMEOUT_INT_EN | 2415 B_AX_OFDM_CCA_TIMEOUT_INT_EN | 2416 B_AX_DATA_ON_TIMEOUT_INT_EN | 2417 B_AX_STS_ON_TIMEOUT_INT_EN | 2418 B_AX_CSI_ON_TIMEOUT_INT_EN); 2419 2420 reg = rtw89_mac_reg_by_idx(R_AX_RMAC_ERR_ISR, mac_idx); 2421 val = rtw89_read32(rtwdev, reg); 2422 val |= (B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN | 2423 B_AX_RMAC_RX_TIMEOUT_INT_EN | 2424 B_AX_RMAC_CSI_TIMEOUT_INT_EN); 2425 val &= ~(B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN | 2426 B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN | 2427 B_AX_RMAC_CCA_TIMEOUT_INT_EN | 2428 B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN); 2429 rtw89_write32(rtwdev, reg, val); 2430 } else { 2431 return -EINVAL; 2432 } 2433 2434 return 0; 2435 } 2436 2437 static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable) 2438 { 2439 int ret = 0; 2440 2441 if (enable) { 2442 ret = band1_enable(rtwdev); 2443 if (ret) { 2444 rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret); 2445 return ret; 2446 } 2447 2448 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL); 2449 if (ret) { 2450 rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret); 2451 return ret; 2452 } 2453 } else { 2454 rtw89_err(rtwdev, "[ERR] disable dbcc is not implemented not\n"); 2455 return -EINVAL; 2456 } 2457 2458 return 0; 2459 } 2460 2461 static int set_host_rpr(struct rtw89_dev *rtwdev) 2462 { 2463 if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) { 2464 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 2465 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_POH); 2466 rtw89_write32_set(rtwdev, R_AX_RLSRPT0_CFG0, 2467 B_AX_RLSRPT0_FLTR_MAP_MASK); 2468 } else { 2469 rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG, 2470 B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_STF); 2471 rtw89_write32_clr(rtwdev, R_AX_RLSRPT0_CFG0, 2472 B_AX_RLSRPT0_FLTR_MAP_MASK); 2473 } 2474 2475 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_AGGNUM_MASK, 30); 2476 rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_TO_MASK, 255); 2477 2478 return 0; 2479 } 2480 2481 static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev) 2482 { 2483 enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode; 2484 int ret; 2485 2486 ret = dmac_init(rtwdev, 0); 2487 if (ret) { 2488 rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret); 2489 return ret; 2490 } 2491 2492 ret = cmac_init(rtwdev, 0); 2493 if (ret) { 2494 rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret); 2495 return ret; 2496 } 2497 2498 if (is_qta_dbcc(rtwdev, qta_mode)) { 2499 ret = rtw89_mac_dbcc_enable(rtwdev, true); 2500 if (ret) { 2501 rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret); 2502 return ret; 2503 } 2504 } 2505 2506 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL); 2507 if (ret) { 2508 rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret); 2509 return ret; 2510 } 2511 2512 ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 2513 if (ret) { 2514 rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret); 2515 return ret; 2516 } 2517 2518 ret = set_host_rpr(rtwdev); 2519 if (ret) { 2520 rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret); 2521 return ret; 2522 } 2523 2524 return 0; 2525 } 2526 2527 static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev) 2528 { 2529 clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 2530 2531 rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 2532 rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 2533 } 2534 2535 static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, 2536 bool dlfw) 2537 { 2538 u32 val; 2539 int ret; 2540 2541 if (rtw89_read32(rtwdev, R_AX_PLATFORM_ENABLE) & B_AX_WCPU_EN) 2542 return -EFAULT; 2543 2544 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 2545 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 2546 2547 rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN); 2548 2549 val = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 2550 val &= ~(B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY); 2551 val = u32_replace_bits(val, RTW89_FWDL_INITIAL_STATE, 2552 B_AX_WCPU_FWDL_STS_MASK); 2553 2554 if (dlfw) 2555 val |= B_AX_WCPU_FWDL_EN; 2556 2557 rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val); 2558 rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK, 2559 boot_reason); 2560 rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN); 2561 2562 if (!dlfw) { 2563 mdelay(5); 2564 2565 ret = rtw89_fw_check_rdy(rtwdev); 2566 if (ret) 2567 return ret; 2568 } 2569 2570 return 0; 2571 } 2572 2573 static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev) 2574 { 2575 u32 val; 2576 int ret; 2577 2578 val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN | 2579 B_AX_PKT_BUF_EN; 2580 rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val); 2581 2582 val = B_AX_DISPATCHER_CLK_EN; 2583 rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val); 2584 2585 ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode); 2586 if (ret) { 2587 rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret); 2588 return ret; 2589 } 2590 2591 ret = hfc_init(rtwdev, true, false, true); 2592 if (ret) { 2593 rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret); 2594 return ret; 2595 } 2596 2597 return ret; 2598 } 2599 2600 static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev) 2601 { 2602 rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, 2603 B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN); 2604 } 2605 2606 void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev) 2607 { 2608 rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, 2609 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 2610 rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, 2611 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 2612 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 2613 rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 2614 } 2615 2616 void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev) 2617 { 2618 rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, 2619 B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); 2620 rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, 2621 B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 | 2622 B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1); 2623 rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE); 2624 } 2625 2626 int rtw89_mac_partial_init(struct rtw89_dev *rtwdev) 2627 { 2628 int ret; 2629 2630 ret = rtw89_mac_power_switch(rtwdev, true); 2631 if (ret) { 2632 rtw89_mac_power_switch(rtwdev, false); 2633 ret = rtw89_mac_power_switch(rtwdev, true); 2634 if (ret) 2635 return ret; 2636 } 2637 2638 rtw89_mac_hci_func_en(rtwdev); 2639 2640 if (rtwdev->hci.ops->mac_pre_init) { 2641 ret = rtwdev->hci.ops->mac_pre_init(rtwdev); 2642 if (ret) 2643 return ret; 2644 } 2645 2646 ret = rtw89_mac_fw_dl_pre_init(rtwdev); 2647 if (ret) 2648 return ret; 2649 2650 rtw89_mac_disable_cpu(rtwdev); 2651 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 2652 if (ret) 2653 return ret; 2654 2655 ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL); 2656 if (ret) 2657 return ret; 2658 2659 return 0; 2660 } 2661 2662 int rtw89_mac_init(struct rtw89_dev *rtwdev) 2663 { 2664 int ret; 2665 2666 ret = rtw89_mac_partial_init(rtwdev); 2667 if (ret) 2668 goto fail; 2669 2670 rtw89_mac_enable_bb_rf(rtwdev); 2671 2672 ret = rtw89_mac_sys_init(rtwdev); 2673 if (ret) 2674 goto fail; 2675 2676 ret = rtw89_mac_trx_init(rtwdev); 2677 if (ret) 2678 goto fail; 2679 2680 if (rtwdev->hci.ops->mac_post_init) { 2681 ret = rtwdev->hci.ops->mac_post_init(rtwdev); 2682 if (ret) 2683 goto fail; 2684 } 2685 2686 rtw89_fw_send_all_early_h2c(rtwdev); 2687 rtw89_fw_h2c_set_ofld_cfg(rtwdev); 2688 2689 return ret; 2690 fail: 2691 rtw89_mac_power_switch(rtwdev, false); 2692 2693 return ret; 2694 } 2695 2696 static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 2697 { 2698 u8 i; 2699 2700 for (i = 0; i < 4; i++) { 2701 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 2702 DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2)); 2703 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0); 2704 } 2705 } 2706 2707 static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid) 2708 { 2709 rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, 2710 CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE); 2711 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4); 2712 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 4, 0x400A0004); 2713 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 8, 0); 2714 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 12, 0); 2715 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 16, 0); 2716 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 20, 0xE43000B); 2717 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 24, 0); 2718 rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109); 2719 } 2720 2721 int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause) 2722 { 2723 u8 sh = FIELD_GET(GENMASK(4, 0), macid); 2724 u8 grp = macid >> 5; 2725 int ret; 2726 2727 ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL); 2728 if (ret) 2729 return ret; 2730 2731 rtw89_fw_h2c_macid_pause(rtwdev, sh, grp, pause); 2732 2733 return 0; 2734 } 2735 2736 static const struct rtw89_port_reg rtw_port_base = { 2737 .port_cfg = R_AX_PORT_CFG_P0, 2738 .tbtt_prohib = R_AX_TBTT_PROHIB_P0, 2739 .bcn_area = R_AX_BCN_AREA_P0, 2740 .bcn_early = R_AX_BCNERLYINT_CFG_P0, 2741 .tbtt_early = R_AX_TBTTERLYINT_CFG_P0, 2742 .tbtt_agg = R_AX_TBTT_AGG_P0, 2743 .bcn_space = R_AX_BCN_SPACE_CFG_P0, 2744 .bcn_forcetx = R_AX_BCN_FORCETX_P0, 2745 .bcn_err_cnt = R_AX_BCN_ERR_CNT_P0, 2746 .bcn_err_flag = R_AX_BCN_ERR_FLAG_P0, 2747 .dtim_ctrl = R_AX_DTIM_CTRL_P0, 2748 .tbtt_shift = R_AX_TBTT_SHIFT_P0, 2749 .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0, 2750 .tsftr_l = R_AX_TSFTR_LOW_P0, 2751 .tsftr_h = R_AX_TSFTR_HIGH_P0 2752 }; 2753 2754 #define BCN_INTERVAL 100 2755 #define BCN_ERLY_DEF 160 2756 #define BCN_SETUP_DEF 2 2757 #define BCN_HOLD_DEF 200 2758 #define BCN_MASK_DEF 0 2759 #define TBTT_ERLY_DEF 5 2760 #define BCN_SET_UNIT 32 2761 #define BCN_ERLY_SET_DLY (10 * 2) 2762 2763 static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev, 2764 struct rtw89_vif *rtwvif) 2765 { 2766 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2767 const struct rtw89_port_reg *p = &rtw_port_base; 2768 2769 if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN)) 2770 return; 2771 2772 rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK); 2773 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1); 2774 rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK); 2775 rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK); 2776 2777 msleep(vif->bss_conf.beacon_int + 1); 2778 2779 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN | 2780 B_AX_BRK_SETUP); 2781 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST); 2782 rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0); 2783 } 2784 2785 static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev, 2786 struct rtw89_vif *rtwvif, bool en) 2787 { 2788 const struct rtw89_port_reg *p = &rtw_port_base; 2789 2790 if (en) 2791 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); 2792 else 2793 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN); 2794 } 2795 2796 static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev, 2797 struct rtw89_vif *rtwvif, bool en) 2798 { 2799 const struct rtw89_port_reg *p = &rtw_port_base; 2800 2801 if (en) 2802 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); 2803 else 2804 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN); 2805 } 2806 2807 static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev, 2808 struct rtw89_vif *rtwvif) 2809 { 2810 const struct rtw89_port_reg *p = &rtw_port_base; 2811 2812 rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK, 2813 rtwvif->net_type); 2814 } 2815 2816 static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev, 2817 struct rtw89_vif *rtwvif) 2818 { 2819 const struct rtw89_port_reg *p = &rtw_port_base; 2820 bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 2821 u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP; 2822 2823 if (en) 2824 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bits); 2825 else 2826 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bits); 2827 } 2828 2829 static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev, 2830 struct rtw89_vif *rtwvif) 2831 { 2832 const struct rtw89_port_reg *p = &rtw_port_base; 2833 bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || 2834 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2835 u32 bit = B_AX_RX_BSSID_FIT_EN; 2836 2837 if (en) 2838 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bit); 2839 else 2840 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit); 2841 } 2842 2843 static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, 2844 struct rtw89_vif *rtwvif) 2845 { 2846 const struct rtw89_port_reg *p = &rtw_port_base; 2847 bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA || 2848 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2849 2850 if (en) 2851 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); 2852 else 2853 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN); 2854 } 2855 2856 static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev, 2857 struct rtw89_vif *rtwvif) 2858 { 2859 const struct rtw89_port_reg *p = &rtw_port_base; 2860 bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || 2861 rtwvif->net_type == RTW89_NET_TYPE_AD_HOC; 2862 2863 if (en) 2864 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); 2865 else 2866 rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN); 2867 } 2868 2869 static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev, 2870 struct rtw89_vif *rtwvif) 2871 { 2872 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2873 const struct rtw89_port_reg *p = &rtw_port_base; 2874 u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL; 2875 2876 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK, 2877 bcn_int); 2878 } 2879 2880 static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev, 2881 struct rtw89_vif *rtwvif) 2882 { 2883 static const u32 hiq_win_addr[RTW89_PORT_NUM] = { 2884 R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG, 2885 R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2, 2886 R_AX_PORT_HGQ_WINDOW_CFG + 3, 2887 }; 2888 u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0; 2889 u8 port = rtwvif->port; 2890 u32 reg; 2891 2892 reg = rtw89_mac_reg_by_idx(hiq_win_addr[port], rtwvif->mac_idx); 2893 rtw89_write8(rtwdev, reg, win); 2894 } 2895 2896 static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev, 2897 struct rtw89_vif *rtwvif) 2898 { 2899 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2900 const struct rtw89_port_reg *p = &rtw_port_base; 2901 u32 addr; 2902 2903 addr = rtw89_mac_reg_by_idx(R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx); 2904 rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE); 2905 2906 rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK, 2907 vif->bss_conf.dtim_period); 2908 } 2909 2910 static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev, 2911 struct rtw89_vif *rtwvif) 2912 { 2913 const struct rtw89_port_reg *p = &rtw_port_base; 2914 2915 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, 2916 B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF); 2917 } 2918 2919 static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev, 2920 struct rtw89_vif *rtwvif) 2921 { 2922 const struct rtw89_port_reg *p = &rtw_port_base; 2923 2924 rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, 2925 B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF); 2926 } 2927 2928 static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev, 2929 struct rtw89_vif *rtwvif) 2930 { 2931 const struct rtw89_port_reg *p = &rtw_port_base; 2932 2933 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area, 2934 B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF); 2935 } 2936 2937 static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev, 2938 struct rtw89_vif *rtwvif) 2939 { 2940 const struct rtw89_port_reg *p = &rtw_port_base; 2941 2942 rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early, 2943 B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF); 2944 } 2945 2946 static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev, 2947 struct rtw89_vif *rtwvif) 2948 { 2949 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2950 static const u32 masks[RTW89_PORT_NUM] = { 2951 B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK, 2952 B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK, 2953 B_AX_BSS_COLOB_AX_PORT_4_MASK, 2954 }; 2955 u8 port = rtwvif->port; 2956 u32 reg_base; 2957 u32 reg; 2958 u8 bss_color; 2959 2960 bss_color = vif->bss_conf.he_bss_color.color; 2961 reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0; 2962 reg = rtw89_mac_reg_by_idx(reg_base, rtwvif->mac_idx); 2963 rtw89_write32_mask(rtwdev, reg, masks[port], bss_color); 2964 } 2965 2966 static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev, 2967 struct rtw89_vif *rtwvif) 2968 { 2969 u8 port = rtwvif->port; 2970 u32 reg; 2971 2972 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2973 return; 2974 2975 if (port == 0) { 2976 reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_CTRL, rtwvif->mac_idx); 2977 rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK); 2978 } 2979 } 2980 2981 static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev, 2982 struct rtw89_vif *rtwvif) 2983 { 2984 u8 port = rtwvif->port; 2985 u32 reg; 2986 u32 val; 2987 2988 reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_DROP_0, rtwvif->mac_idx); 2989 val = rtw89_read32(rtwdev, reg); 2990 val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port)); 2991 if (port == 0) 2992 val &= ~BIT(0); 2993 rtw89_write32(rtwdev, reg, val); 2994 } 2995 2996 static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev, 2997 struct rtw89_vif *rtwvif) 2998 { 2999 const struct rtw89_port_reg *p = &rtw_port_base; 3000 3001 rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN); 3002 } 3003 3004 static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev, 3005 struct rtw89_vif *rtwvif) 3006 { 3007 const struct rtw89_port_reg *p = &rtw_port_base; 3008 3009 rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK, 3010 BCN_ERLY_DEF); 3011 } 3012 3013 int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3014 { 3015 int ret; 3016 3017 ret = rtw89_mac_port_update(rtwdev, rtwvif); 3018 if (ret) 3019 return ret; 3020 3021 rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id); 3022 rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id); 3023 3024 ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif->mac_id, false); 3025 if (ret) 3026 return ret; 3027 3028 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_CREATE); 3029 if (ret) 3030 return ret; 3031 3032 ret = rtw89_cam_init(rtwdev, rtwvif); 3033 if (ret) 3034 return ret; 3035 3036 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); 3037 if (ret) 3038 return ret; 3039 3040 ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif); 3041 if (ret) 3042 return ret; 3043 3044 return 0; 3045 } 3046 3047 int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3048 { 3049 int ret; 3050 3051 ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_REMOVE); 3052 if (ret) 3053 return ret; 3054 3055 rtw89_cam_deinit(rtwdev, rtwvif); 3056 3057 ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL); 3058 if (ret) 3059 return ret; 3060 3061 return 0; 3062 } 3063 3064 int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3065 { 3066 u8 port = rtwvif->port; 3067 3068 if (port >= RTW89_PORT_NUM) 3069 return -EINVAL; 3070 3071 rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif); 3072 rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif, false); 3073 rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif, false); 3074 rtw89_mac_port_cfg_net_type(rtwdev, rtwvif); 3075 rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif); 3076 rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif); 3077 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif); 3078 rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif); 3079 rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif); 3080 rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif); 3081 rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif); 3082 rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif); 3083 rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif); 3084 rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif); 3085 rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif); 3086 rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif); 3087 rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif); 3088 rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif); 3089 rtw89_mac_port_cfg_func_en(rtwdev, rtwvif); 3090 fsleep(BCN_ERLY_SET_DLY); 3091 rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif); 3092 3093 return 0; 3094 } 3095 3096 int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3097 { 3098 int ret; 3099 3100 rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map, 3101 RTW89_MAX_MAC_ID_NUM); 3102 if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM) 3103 return -ENOSPC; 3104 3105 ret = rtw89_mac_vif_init(rtwdev, rtwvif); 3106 if (ret) 3107 goto release_mac_id; 3108 3109 return 0; 3110 3111 release_mac_id: 3112 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); 3113 3114 return ret; 3115 } 3116 3117 int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3118 { 3119 int ret; 3120 3121 ret = rtw89_mac_vif_deinit(rtwdev, rtwvif); 3122 rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); 3123 3124 return ret; 3125 } 3126 3127 static void 3128 rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3129 { 3130 } 3131 3132 static void 3133 rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3134 { 3135 rtw89_debug(rtwdev, RTW89_DBG_FW, 3136 "C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n", 3137 RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data), 3138 RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h->data), 3139 RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h->data), 3140 RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h->data)); 3141 } 3142 3143 static void 3144 rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3145 { 3146 rtw89_debug(rtwdev, RTW89_DBG_FW, 3147 "C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n", 3148 RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h->data), 3149 RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h->data), 3150 RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h->data), 3151 RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h->data), 3152 RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h->data)); 3153 } 3154 3155 static void 3156 rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3157 { 3158 rtw89_info(rtwdev, "%*s", RTW89_GET_C2H_LOG_LEN(len), 3159 RTW89_GET_C2H_LOG_SRT_PRT(c2h->data)); 3160 } 3161 3162 static void 3163 rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 3164 { 3165 } 3166 3167 static 3168 void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev, 3169 struct sk_buff *c2h, u32 len) = { 3170 [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL, 3171 [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL, 3172 [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL, 3173 [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL, 3174 [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause, 3175 }; 3176 3177 static 3178 void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev, 3179 struct sk_buff *c2h, u32 len) = { 3180 [RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack, 3181 [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack, 3182 [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log, 3183 [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt, 3184 }; 3185 3186 void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3187 u32 len, u8 class, u8 func) 3188 { 3189 void (*handler)(struct rtw89_dev *rtwdev, 3190 struct sk_buff *c2h, u32 len) = NULL; 3191 3192 switch (class) { 3193 case RTW89_MAC_C2H_CLASS_INFO: 3194 if (func < RTW89_MAC_C2H_FUNC_INFO_MAX) 3195 handler = rtw89_mac_c2h_info_handler[func]; 3196 break; 3197 case RTW89_MAC_C2H_CLASS_OFLD: 3198 if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX) 3199 handler = rtw89_mac_c2h_ofld_handler[func]; 3200 break; 3201 case RTW89_MAC_C2H_CLASS_FWDBG: 3202 return; 3203 default: 3204 rtw89_info(rtwdev, "c2h class %d not support\n", class); 3205 return; 3206 } 3207 if (!handler) { 3208 rtw89_info(rtwdev, "c2h class %d func %d not support\n", class, 3209 func); 3210 return; 3211 } 3212 handler(rtwdev, skb, len); 3213 } 3214 3215 bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev, 3216 enum rtw89_phy_idx phy_idx, 3217 u32 reg_base, u32 *cr) 3218 { 3219 const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem; 3220 enum rtw89_qta_mode mode = dle_mem->mode; 3221 u32 addr = rtw89_mac_reg_by_idx(reg_base, phy_idx); 3222 3223 if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) { 3224 rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n", 3225 addr); 3226 goto error; 3227 } 3228 3229 if (addr >= CMAC1_START_ADDR && addr <= CMAC1_END_ADDR) 3230 if (mode == RTW89_QTA_SCC) { 3231 rtw89_err(rtwdev, 3232 "[TXPWR] addr=0x%x but hw not enable\n", 3233 addr); 3234 goto error; 3235 } 3236 3237 *cr = addr; 3238 return true; 3239 3240 error: 3241 rtw89_err(rtwdev, "[TXPWR] check txpwr cr 0x%x(phy%d) fail\n", 3242 addr, phy_idx); 3243 3244 return false; 3245 } 3246 EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr); 3247 3248 int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable) 3249 { 3250 u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx); 3251 int ret = 0; 3252 3253 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3254 if (ret) 3255 return ret; 3256 3257 if (!enable) { 3258 rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN); 3259 return ret; 3260 } 3261 3262 rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN | 3263 B_AX_APP_MAC_INFO_RPT | 3264 B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT | 3265 B_AX_PPDU_STAT_RPT_CRC32); 3266 rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK, 3267 RTW89_PRPT_DEST_HOST); 3268 3269 return ret; 3270 } 3271 EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status); 3272 3273 void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx) 3274 { 3275 #define MAC_AX_TIME_TH_SH 5 3276 #define MAC_AX_LEN_TH_SH 4 3277 #define MAC_AX_TIME_TH_MAX 255 3278 #define MAC_AX_LEN_TH_MAX 255 3279 #define MAC_AX_TIME_TH_DEF 88 3280 #define MAC_AX_LEN_TH_DEF 4080 3281 struct ieee80211_hw *hw = rtwdev->hw; 3282 u32 rts_threshold = hw->wiphy->rts_threshold; 3283 u32 time_th, len_th; 3284 u32 reg; 3285 3286 if (rts_threshold == (u32)-1) { 3287 time_th = MAC_AX_TIME_TH_DEF; 3288 len_th = MAC_AX_LEN_TH_DEF; 3289 } else { 3290 time_th = MAC_AX_TIME_TH_MAX << MAC_AX_TIME_TH_SH; 3291 len_th = rts_threshold; 3292 } 3293 3294 time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX); 3295 len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX); 3296 3297 reg = rtw89_mac_reg_by_idx(R_AX_AGG_LEN_HT_0, mac_idx); 3298 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th); 3299 rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th); 3300 } 3301 3302 void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop) 3303 { 3304 bool empty; 3305 int ret; 3306 3307 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 3308 return; 3309 3310 ret = read_poll_timeout(dle_is_txq_empty, empty, empty, 3311 10000, 200000, false, rtwdev); 3312 if (ret && !drop && (rtwdev->total_sta_assoc || rtwdev->scanning)) 3313 rtw89_info(rtwdev, "timed out to flush queues\n"); 3314 } 3315 3316 int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex) 3317 { 3318 u8 val; 3319 u16 val16; 3320 u32 val32; 3321 int ret; 3322 3323 rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT); 3324 rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN); 3325 rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8); 3326 rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK); 3327 rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16); 3328 rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24); 3329 3330 val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0); 3331 val16 = (val16 | B_AX_BTCCA_EN) & ~B_AX_BTCCA_BRK_TXOP_EN; 3332 rtw89_write16(rtwdev, R_AX_CCA_CFG_0, val16); 3333 3334 ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32); 3335 if (ret) { 3336 rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n"); 3337 return ret; 3338 } 3339 val32 = val32 & B_AX_WL_RX_CTRL; 3340 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32); 3341 if (ret) { 3342 rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n"); 3343 return ret; 3344 } 3345 3346 switch (coex->pta_mode) { 3347 case RTW89_MAC_AX_COEX_RTK_MODE: 3348 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 3349 val &= ~B_AX_BTMODE_MASK; 3350 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_0_3); 3351 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 3352 3353 val = rtw89_read8(rtwdev, R_AX_TDMA_MODE); 3354 rtw89_write8(rtwdev, R_AX_TDMA_MODE, val | B_AX_RTK_BT_ENABLE); 3355 3356 val = rtw89_read8(rtwdev, R_AX_BT_COEX_CFG_5); 3357 val &= ~B_AX_BT_RPT_SAMPLE_RATE_MASK; 3358 val |= FIELD_PREP(B_AX_BT_RPT_SAMPLE_RATE_MASK, MAC_AX_RTK_RATE); 3359 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_5, val); 3360 break; 3361 case RTW89_MAC_AX_COEX_CSR_MODE: 3362 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG); 3363 val &= ~B_AX_BTMODE_MASK; 3364 val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_2); 3365 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val); 3366 3367 val16 = rtw89_read16(rtwdev, R_AX_CSR_MODE); 3368 val16 &= ~B_AX_BT_PRI_DETECT_TO_MASK; 3369 val16 |= FIELD_PREP(B_AX_BT_PRI_DETECT_TO_MASK, MAC_AX_CSR_PRI_TO); 3370 val16 &= ~B_AX_BT_TRX_INIT_DETECT_MASK; 3371 val16 |= FIELD_PREP(B_AX_BT_TRX_INIT_DETECT_MASK, MAC_AX_CSR_TRX_TO); 3372 val16 &= ~B_AX_BT_STAT_DELAY_MASK; 3373 val16 |= FIELD_PREP(B_AX_BT_STAT_DELAY_MASK, MAC_AX_CSR_DELAY); 3374 val16 |= B_AX_ENHANCED_BT; 3375 rtw89_write16(rtwdev, R_AX_CSR_MODE, val16); 3376 3377 rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_2, MAC_AX_CSR_RATE); 3378 break; 3379 default: 3380 return -EINVAL; 3381 } 3382 3383 switch (coex->direction) { 3384 case RTW89_MAC_AX_COEX_INNER: 3385 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3386 val = (val & ~BIT(2)) | BIT(1); 3387 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3388 break; 3389 case RTW89_MAC_AX_COEX_OUTPUT: 3390 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3391 val = val | BIT(1) | BIT(0); 3392 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3393 break; 3394 case RTW89_MAC_AX_COEX_INPUT: 3395 val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1); 3396 val = val & ~(BIT(2) | BIT(1)); 3397 rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val); 3398 break; 3399 default: 3400 return -EINVAL; 3401 } 3402 3403 return 0; 3404 } 3405 EXPORT_SYMBOL(rtw89_mac_coex_init); 3406 3407 int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev, 3408 const struct rtw89_mac_ax_coex_gnt *gnt_cfg) 3409 { 3410 u32 val, ret; 3411 3412 ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val); 3413 if (ret) { 3414 rtw89_err(rtwdev, "Read LTE fail!\n"); 3415 return ret; 3416 } 3417 val = (gnt_cfg->band[0].gnt_bt ? 3418 B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) | 3419 (gnt_cfg->band[0].gnt_bt_sw_en ? 3420 B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) | 3421 (gnt_cfg->band[0].gnt_wl ? 3422 B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) | 3423 (gnt_cfg->band[0].gnt_wl_sw_en ? 3424 B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) | 3425 (gnt_cfg->band[1].gnt_bt ? 3426 B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) | 3427 (gnt_cfg->band[1].gnt_bt_sw_en ? 3428 B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) | 3429 (gnt_cfg->band[1].gnt_wl ? 3430 B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) | 3431 (gnt_cfg->band[1].gnt_wl_sw_en ? 3432 B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0); 3433 ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val); 3434 if (ret) { 3435 rtw89_err(rtwdev, "Write LTE fail!\n"); 3436 return ret; 3437 } 3438 3439 return 0; 3440 } 3441 3442 int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt) 3443 { 3444 u32 reg; 3445 u16 val; 3446 int ret; 3447 3448 ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL); 3449 if (ret) 3450 return ret; 3451 3452 reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, plt->band); 3453 val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) | 3454 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) | 3455 (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) | 3456 (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_TX_PLT_GNT_WL : 0) | 3457 (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) | 3458 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) | 3459 (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) | 3460 (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0) | 3461 B_AX_PLT_EN; 3462 rtw89_write16(rtwdev, reg, val); 3463 3464 return 0; 3465 } 3466 3467 void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val) 3468 { 3469 u32 fw_sb; 3470 3471 fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD); 3472 fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb); 3473 fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY; 3474 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) 3475 fw_sb = fw_sb | MAC_AX_NOTIFY_PWR_MAJOR; 3476 else 3477 fw_sb = fw_sb | MAC_AX_NOTIFY_TP_MAJOR; 3478 val = FIELD_GET(B_MAC_AX_SB_DRV_MASK, val); 3479 val = B_AX_TOGGLE | 3480 FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) | 3481 FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb); 3482 rtw89_write32(rtwdev, R_AX_SCOREBOARD, val); 3483 fsleep(1000); /* avoid BT FW loss information */ 3484 } 3485 3486 u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev) 3487 { 3488 return rtw89_read32(rtwdev, R_AX_SCOREBOARD); 3489 } 3490 3491 int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl) 3492 { 3493 u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); 3494 3495 val = wl ? val | BIT(2) : val & ~BIT(2); 3496 rtw89_write8(rtwdev, R_AX_SYS_SDIO_CTRL + 3, val); 3497 3498 return 0; 3499 } 3500 3501 bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) 3502 { 3503 u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3); 3504 3505 return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val); 3506 } 3507 3508 u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band) 3509 { 3510 u32 reg; 3511 u16 cnt; 3512 3513 reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, band); 3514 cnt = rtw89_read32_mask(rtwdev, reg, B_AX_BT_PLT_PKT_CNT_MASK); 3515 rtw89_write16_set(rtwdev, reg, B_AX_BT_PLT_RST); 3516 3517 return cnt; 3518 } 3519 3520 static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en) 3521 { 3522 u32 reg; 3523 u32 mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN | 3524 B_AX_BFMEE_HE_NDPA_EN; 3525 3526 rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en); 3527 reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); 3528 if (en) { 3529 set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3530 rtw89_write32_set(rtwdev, reg, mask); 3531 } else { 3532 clear_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3533 rtw89_write32_clr(rtwdev, reg, mask); 3534 } 3535 } 3536 3537 static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx) 3538 { 3539 u32 reg; 3540 u32 val32; 3541 int ret; 3542 3543 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3544 if (ret) 3545 return ret; 3546 3547 /* AP mode set tx gid to 63 */ 3548 /* STA mode set tx gid to 0(default) */ 3549 reg = rtw89_mac_reg_by_idx(R_AX_BFMER_CTRL_0, mac_idx); 3550 rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN); 3551 3552 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx); 3553 rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP); 3554 3555 reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx); 3556 val32 = FIELD_PREP(B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, BFRP_RX_STANDBY_TIMER); 3557 val32 |= FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER); 3558 rtw89_write32(rtwdev, reg, val32); 3559 rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true); 3560 3561 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3562 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL | 3563 B_AX_BFMEE_USE_NSTS | 3564 B_AX_BFMEE_CSI_GID_SEL | 3565 B_AX_BFMEE_CSI_FORCE_RETE_EN); 3566 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx); 3567 rtw89_write32(rtwdev, reg, 3568 u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) | 3569 u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) | 3570 u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK)); 3571 3572 return 0; 3573 } 3574 3575 static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev, 3576 struct ieee80211_vif *vif, 3577 struct ieee80211_sta *sta) 3578 { 3579 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3580 u8 mac_idx = rtwvif->mac_idx; 3581 u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1; 3582 u8 port_sel = rtwvif->port; 3583 u8 sound_dim = 3, t; 3584 u8 *phy_cap = sta->he_cap.he_cap_elem.phy_cap_info; 3585 u32 reg; 3586 u16 val; 3587 int ret; 3588 3589 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3590 if (ret) 3591 return ret; 3592 3593 if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) || 3594 (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) { 3595 ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD); 3596 stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ); 3597 t = FIELD_GET(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 3598 phy_cap[5]); 3599 sound_dim = min(sound_dim, t); 3600 } 3601 if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) || 3602 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { 3603 ldpc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC); 3604 stbc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK); 3605 t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 3606 sta->vht_cap.cap); 3607 sound_dim = min(sound_dim, t); 3608 } 3609 nc = min(nc, sound_dim); 3610 nr = min(nr, sound_dim); 3611 3612 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3613 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 3614 3615 val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) | 3616 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NR_MASK, nr) | 3617 FIELD_PREP(B_AX_BFMEE_CSIINFO0_NG_MASK, ng) | 3618 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) | 3619 FIELD_PREP(B_AX_BFMEE_CSIINFO0_CS_MASK, cs) | 3620 FIELD_PREP(B_AX_BFMEE_CSIINFO0_LDPC_EN, ldpc_en) | 3621 FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en); 3622 3623 if (port_sel == 0) 3624 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3625 else 3626 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx); 3627 3628 rtw89_write16(rtwdev, reg, val); 3629 3630 return 0; 3631 } 3632 3633 static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev, 3634 struct ieee80211_vif *vif, 3635 struct ieee80211_sta *sta) 3636 { 3637 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3638 u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M); 3639 u32 reg; 3640 u8 mac_idx = rtwvif->mac_idx; 3641 int ret; 3642 3643 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3644 if (ret) 3645 return ret; 3646 3647 if (sta->he_cap.has_he) { 3648 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) | 3649 BIT(RTW89_MAC_BF_RRSC_HE_MSC3) | 3650 BIT(RTW89_MAC_BF_RRSC_HE_MSC5)); 3651 } 3652 if (sta->vht_cap.vht_supported) { 3653 rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) | 3654 BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) | 3655 BIT(RTW89_MAC_BF_RRSC_VHT_MSC5)); 3656 } 3657 if (sta->ht_cap.ht_supported) { 3658 rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) | 3659 BIT(RTW89_MAC_BF_RRSC_HT_MSC3) | 3660 BIT(RTW89_MAC_BF_RRSC_HT_MSC5)); 3661 } 3662 reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx); 3663 rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL); 3664 rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN); 3665 rtw89_write32(rtwdev, 3666 rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx), 3667 rrsc); 3668 3669 return 0; 3670 } 3671 3672 void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3673 struct ieee80211_sta *sta) 3674 { 3675 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3676 3677 if (rtw89_sta_has_beamformer_cap(sta)) { 3678 rtw89_debug(rtwdev, RTW89_DBG_BF, 3679 "initialize bfee for new association\n"); 3680 rtw89_mac_init_bfee(rtwdev, rtwvif->mac_idx); 3681 rtw89_mac_set_csi_para_reg(rtwdev, vif, sta); 3682 rtw89_mac_csi_rrsc(rtwdev, vif, sta); 3683 } 3684 } 3685 3686 void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3687 struct ieee80211_sta *sta) 3688 { 3689 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3690 3691 rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, false); 3692 } 3693 3694 void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3695 struct ieee80211_bss_conf *conf) 3696 { 3697 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3698 u8 mac_idx = rtwvif->mac_idx; 3699 __le32 *p; 3700 3701 rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n"); 3702 3703 p = (__le32 *)conf->mu_group.membership; 3704 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN0, mac_idx), 3705 le32_to_cpu(p[0])); 3706 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN1, mac_idx), 3707 le32_to_cpu(p[1])); 3708 3709 p = (__le32 *)conf->mu_group.position; 3710 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION0, mac_idx), 3711 le32_to_cpu(p[0])); 3712 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION1, mac_idx), 3713 le32_to_cpu(p[1])); 3714 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION2, mac_idx), 3715 le32_to_cpu(p[2])); 3716 rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION3, mac_idx), 3717 le32_to_cpu(p[3])); 3718 } 3719 3720 struct rtw89_mac_bf_monitor_iter_data { 3721 struct rtw89_dev *rtwdev; 3722 struct ieee80211_sta *down_sta; 3723 int count; 3724 }; 3725 3726 static 3727 void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta) 3728 { 3729 struct rtw89_mac_bf_monitor_iter_data *iter_data = 3730 (struct rtw89_mac_bf_monitor_iter_data *)data; 3731 struct ieee80211_sta *down_sta = iter_data->down_sta; 3732 int *count = &iter_data->count; 3733 3734 if (down_sta == sta) 3735 return; 3736 3737 if (rtw89_sta_has_beamformer_cap(sta)) 3738 (*count)++; 3739 } 3740 3741 void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev, 3742 struct ieee80211_sta *sta, bool disconnect) 3743 { 3744 struct rtw89_mac_bf_monitor_iter_data data; 3745 3746 data.rtwdev = rtwdev; 3747 data.down_sta = disconnect ? sta : NULL; 3748 data.count = 0; 3749 ieee80211_iterate_stations_atomic(rtwdev->hw, 3750 rtw89_mac_bf_monitor_calc_iter, 3751 &data); 3752 3753 rtw89_debug(rtwdev, RTW89_DBG_BF, "bfee STA count=%d\n", data.count); 3754 if (data.count) 3755 set_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 3756 else 3757 clear_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags); 3758 } 3759 3760 void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev) 3761 { 3762 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3763 struct rtw89_vif *rtwvif; 3764 bool en = stats->tx_tfc_lv <= stats->rx_tfc_lv; 3765 bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags); 3766 3767 if (en == old) 3768 return; 3769 3770 rtw89_for_each_rtwvif(rtwdev, rtwvif) 3771 rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, en); 3772 } 3773 3774 static int 3775 __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3776 u32 tx_time) 3777 { 3778 #define MAC_AX_DFLT_TX_TIME 5280 3779 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3780 u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time; 3781 u32 reg; 3782 int ret = 0; 3783 3784 if (rtwsta->cctl_tx_time) { 3785 rtwsta->ampdu_max_time = (max_tx_time - 512) >> 9; 3786 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3787 } else { 3788 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3789 if (ret) { 3790 rtw89_warn(rtwdev, "failed to check cmac in set txtime\n"); 3791 return ret; 3792 } 3793 3794 reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); 3795 rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK, 3796 max_tx_time >> 5); 3797 } 3798 3799 return ret; 3800 } 3801 3802 int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3803 bool resume, u32 tx_time) 3804 { 3805 int ret = 0; 3806 3807 if (!resume) { 3808 rtwsta->cctl_tx_time = true; 3809 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); 3810 } else { 3811 ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time); 3812 rtwsta->cctl_tx_time = false; 3813 } 3814 3815 return ret; 3816 } 3817 3818 int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 3819 u32 *tx_time) 3820 { 3821 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3822 u32 reg; 3823 int ret = 0; 3824 3825 if (rtwsta->cctl_tx_time) { 3826 *tx_time = (rtwsta->ampdu_max_time + 1) << 9; 3827 } else { 3828 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3829 if (ret) { 3830 rtw89_warn(rtwdev, "failed to check cmac in tx_time\n"); 3831 return ret; 3832 } 3833 3834 reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx); 3835 *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5; 3836 } 3837 3838 return ret; 3839 } 3840 3841 int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, 3842 struct rtw89_sta *rtwsta, 3843 bool resume, u8 tx_retry) 3844 { 3845 int ret = 0; 3846 3847 rtwsta->data_tx_cnt_lmt = tx_retry; 3848 3849 if (!resume) { 3850 rtwsta->cctl_tx_retry_limit = true; 3851 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3852 } else { 3853 ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta); 3854 rtwsta->cctl_tx_retry_limit = false; 3855 } 3856 3857 return ret; 3858 } 3859 3860 int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, 3861 struct rtw89_sta *rtwsta, u8 *tx_retry) 3862 { 3863 u8 mac_idx = rtwsta->rtwvif->mac_idx; 3864 u32 reg; 3865 int ret = 0; 3866 3867 if (rtwsta->cctl_tx_retry_limit) { 3868 *tx_retry = rtwsta->data_tx_cnt_lmt; 3869 } else { 3870 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3871 if (ret) { 3872 rtw89_warn(rtwdev, "failed to check cmac in rty_lmt\n"); 3873 return ret; 3874 } 3875 3876 reg = rtw89_mac_reg_by_idx(R_AX_TXCNT, mac_idx); 3877 *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK); 3878 } 3879 3880 return ret; 3881 } 3882 3883 int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev, 3884 struct rtw89_vif *rtwvif, bool en) 3885 { 3886 u8 mac_idx = rtwvif->mac_idx; 3887 u16 set = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0; 3888 u32 reg; 3889 u32 ret; 3890 3891 ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); 3892 if (ret) 3893 return ret; 3894 3895 reg = rtw89_mac_reg_by_idx(R_AX_MUEDCA_EN, mac_idx); 3896 if (en) 3897 rtw89_write16_set(rtwdev, reg, set); 3898 else 3899 rtw89_write16_clr(rtwdev, reg, set); 3900 3901 return 0; 3902 } 3903