1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #if defined(__FreeBSD__) 6 #define LINUXKPI_PARAM_PREFIX rtw89_pci_ 7 #endif 8 9 #include <linux/pci.h> 10 #if defined(__FreeBSD__) 11 #include <sys/rman.h> 12 #endif 13 14 #include "mac.h" 15 #include "pci.h" 16 #include "reg.h" 17 #include "ser.h" 18 19 static bool rtw89_pci_disable_clkreq; 20 static bool rtw89_pci_disable_aspm_l1; 21 static bool rtw89_pci_disable_l1ss; 22 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 23 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 24 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 25 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 26 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 27 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 28 29 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev, 30 u32 *phy_offset) 31 { 32 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 33 struct pci_dev *pdev = rtwpci->pdev; 34 u32 val; 35 int ret; 36 37 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 38 if (ret) 39 return ret; 40 41 val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK); 42 if (val == RTW89_PCIE_GEN1_SPEED) { 43 *phy_offset = R_RAC_DIRECT_OFFSET_G1; 44 } else if (val == RTW89_PCIE_GEN2_SPEED) { 45 *phy_offset = R_RAC_DIRECT_OFFSET_G2; 46 } else { 47 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val); 48 return -EFAULT; 49 } 50 51 return 0; 52 } 53 54 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) 55 { 56 u32 val; 57 int ret; 58 59 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM); 60 61 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 62 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 63 rtwdev, R_AX_PCIE_INIT_CFG1); 64 65 return ret; 66 } 67 68 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 69 struct rtw89_pci_dma_ring *bd_ring, 70 u32 cur_idx, bool tx) 71 { 72 const struct rtw89_pci_info *info = rtwdev->pci_info; 73 u32 cnt, cur_rp, wp, rp, len; 74 75 rp = bd_ring->rp; 76 wp = bd_ring->wp; 77 len = bd_ring->len; 78 79 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 80 if (tx) { 81 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 82 } else { 83 if (info->rx_ring_eq_is_full) 84 wp += 1; 85 86 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 87 } 88 89 bd_ring->rp = cur_rp; 90 91 return cnt; 92 } 93 94 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 95 struct rtw89_pci_tx_ring *tx_ring) 96 { 97 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 98 u32 addr_idx = bd_ring->addr.idx; 99 u32 cnt, idx; 100 101 idx = rtw89_read32(rtwdev, addr_idx); 102 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 103 104 return cnt; 105 } 106 107 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 108 struct rtw89_pci *rtwpci, 109 u32 cnt, bool release_all) 110 { 111 struct rtw89_pci_tx_data *tx_data; 112 struct sk_buff *skb; 113 u32 qlen; 114 115 while (cnt--) { 116 skb = skb_dequeue(&rtwpci->h2c_queue); 117 if (!skb) { 118 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 119 return; 120 } 121 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 122 } 123 124 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 125 if (!release_all) 126 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 127 128 while (qlen--) { 129 skb = skb_dequeue(&rtwpci->h2c_release_queue); 130 if (!skb) { 131 rtw89_err(rtwdev, "failed to release fwcmd\n"); 132 return; 133 } 134 tx_data = RTW89_PCI_TX_SKB_CB(skb); 135 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 136 DMA_TO_DEVICE); 137 dev_kfree_skb_any(skb); 138 } 139 } 140 141 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 142 struct rtw89_pci *rtwpci) 143 { 144 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 145 u32 cnt; 146 147 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 148 if (!cnt) 149 return; 150 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 151 } 152 153 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 154 struct rtw89_pci_rx_ring *rx_ring) 155 { 156 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 157 u32 addr_idx = bd_ring->addr.idx; 158 u32 cnt, idx; 159 160 idx = rtw89_read32(rtwdev, addr_idx); 161 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 162 163 return cnt; 164 } 165 166 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 167 struct sk_buff *skb) 168 { 169 struct rtw89_pci_rx_info *rx_info; 170 dma_addr_t dma; 171 172 rx_info = RTW89_PCI_RX_SKB_CB(skb); 173 dma = rx_info->dma; 174 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 175 DMA_FROM_DEVICE); 176 } 177 178 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 179 struct sk_buff *skb) 180 { 181 struct rtw89_pci_rx_info *rx_info; 182 dma_addr_t dma; 183 184 rx_info = RTW89_PCI_RX_SKB_CB(skb); 185 dma = rx_info->dma; 186 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 187 DMA_FROM_DEVICE); 188 } 189 190 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 191 struct sk_buff *skb) 192 { 193 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 194 struct rtw89_pci_rxbd_info *rxbd_info; 195 __le32 info; 196 197 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 198 info = rxbd_info->dword; 199 200 rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); 201 rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); 202 rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); 203 rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); 204 } 205 206 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, 207 struct rtw89_pci_rx_ring *rx_ring, 208 struct sk_buff *skb) 209 { 210 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 211 const struct rtw89_pci_info *info = rtwdev->pci_info; 212 u32 target_rx_tag; 213 214 if (!info->check_rx_tag) 215 return 0; 216 217 /* valid range is 1 ~ 0x1FFF */ 218 if (rx_ring->target_rx_tag == 0) 219 target_rx_tag = 1; 220 else 221 target_rx_tag = rx_ring->target_rx_tag; 222 223 if (rx_info->tag != target_rx_tag) { 224 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n", 225 rx_info->tag, target_rx_tag); 226 return -EAGAIN; 227 } 228 229 return 0; 230 } 231 232 static 233 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, 234 struct rtw89_pci_rx_ring *rx_ring, 235 struct sk_buff *skb) 236 { 237 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 238 int rx_tag_retry = 100; 239 int ret; 240 241 do { 242 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 243 rtw89_pci_rxbd_info_update(rtwdev, skb); 244 245 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb); 246 if (ret != -EAGAIN) 247 break; 248 } while (rx_tag_retry--); 249 250 /* update target rx_tag for next RX */ 251 rx_ring->target_rx_tag = rx_info->tag + 1; 252 253 return ret; 254 } 255 256 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable) 257 { 258 const struct rtw89_pci_info *info = rtwdev->pci_info; 259 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 260 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 261 262 if (enable) { 263 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 264 if (dma_stop2->addr) 265 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 266 } else { 267 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 268 if (dma_stop2->addr) 269 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 270 } 271 } 272 273 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable) 274 { 275 const struct rtw89_pci_info *info = rtwdev->pci_info; 276 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 277 278 if (enable) 279 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 280 else 281 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 282 } 283 284 static bool 285 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 286 struct sk_buff *new, 287 const struct sk_buff *skb, u32 offset, 288 const struct rtw89_pci_rx_info *rx_info, 289 const struct rtw89_rx_desc_info *desc_info) 290 { 291 u32 copy_len = rx_info->len - offset; 292 293 if (unlikely(skb_tailroom(new) < copy_len)) { 294 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 295 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 296 rx_info->len, desc_info->pkt_size, offset, fs, ls); 297 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 298 skb->data, rx_info->len); 299 /* length of a single segment skb is desc_info->pkt_size */ 300 if (fs && ls) { 301 copy_len = desc_info->pkt_size; 302 } else { 303 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 304 return false; 305 } 306 } 307 308 skb_put_data(new, skb->data + offset, copy_len); 309 310 return true; 311 } 312 313 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev, 314 struct rtw89_pci_dma_ring *bd_ring) 315 { 316 const struct rtw89_pci_info *info = rtwdev->pci_info; 317 u32 wp = bd_ring->wp; 318 319 if (!info->rx_ring_eq_is_full) 320 return wp; 321 322 if (++wp >= bd_ring->len) 323 wp = 0; 324 325 return wp; 326 } 327 328 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 329 struct rtw89_pci_rx_ring *rx_ring) 330 { 331 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 332 struct rtw89_pci_rx_info *rx_info; 333 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 334 struct sk_buff *new = rx_ring->diliver_skb; 335 struct sk_buff *skb; 336 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 337 u32 skb_idx; 338 u32 offset; 339 u32 cnt = 1; 340 bool fs, ls; 341 int ret; 342 343 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 344 skb = rx_ring->buf[skb_idx]; 345 346 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 347 if (ret) { 348 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 349 bd_ring->wp, ret); 350 goto err_sync_device; 351 } 352 353 rx_info = RTW89_PCI_RX_SKB_CB(skb); 354 fs = rx_info->fs; 355 ls = rx_info->ls; 356 357 if (fs) { 358 if (new) { 359 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 360 "skb should not be ready before first segment start\n"); 361 goto err_sync_device; 362 } 363 if (desc_info->ready) { 364 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 365 goto err_sync_device; 366 } 367 368 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 369 370 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 371 if (!new) 372 goto err_sync_device; 373 374 rx_ring->diliver_skb = new; 375 376 /* first segment has RX desc */ 377 offset = desc_info->offset + desc_info->rxd_len; 378 } else { 379 offset = sizeof(struct rtw89_pci_rxbd_info); 380 if (!new) { 381 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 382 goto err_sync_device; 383 } 384 } 385 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 386 goto err_sync_device; 387 rtw89_pci_sync_skb_for_device(rtwdev, skb); 388 rtw89_pci_rxbd_increase(rx_ring, 1); 389 390 if (!desc_info->ready) { 391 rtw89_warn(rtwdev, "no rx desc information\n"); 392 goto err_free_resource; 393 } 394 if (ls) { 395 rtw89_core_rx(rtwdev, desc_info, new); 396 rx_ring->diliver_skb = NULL; 397 desc_info->ready = false; 398 } 399 400 return cnt; 401 402 err_sync_device: 403 rtw89_pci_sync_skb_for_device(rtwdev, skb); 404 rtw89_pci_rxbd_increase(rx_ring, 1); 405 err_free_resource: 406 if (new) 407 dev_kfree_skb_any(new); 408 rx_ring->diliver_skb = NULL; 409 desc_info->ready = false; 410 411 return cnt; 412 } 413 414 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 415 struct rtw89_pci_rx_ring *rx_ring, 416 u32 cnt) 417 { 418 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 419 u32 rx_cnt; 420 421 while (cnt && rtwdev->napi_budget_countdown > 0) { 422 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 423 if (!rx_cnt) { 424 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 425 426 /* skip the rest RXBD bufs */ 427 rtw89_pci_rxbd_increase(rx_ring, cnt); 428 break; 429 } 430 431 cnt -= rx_cnt; 432 } 433 434 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 435 } 436 437 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 438 struct rtw89_pci *rtwpci, int budget) 439 { 440 struct rtw89_pci_rx_ring *rx_ring; 441 int countdown = rtwdev->napi_budget_countdown; 442 u32 cnt; 443 444 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 445 446 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 447 if (!cnt) 448 return 0; 449 450 cnt = min_t(u32, budget, cnt); 451 452 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 453 454 /* In case of flushing pending SKBs, the countdown may exceed. */ 455 if (rtwdev->napi_budget_countdown <= 0) 456 return budget; 457 458 return budget - countdown; 459 } 460 461 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 462 struct rtw89_pci_tx_ring *tx_ring, 463 struct sk_buff *skb, u8 tx_status) 464 { 465 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 466 struct ieee80211_tx_info *info; 467 468 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 469 470 info = IEEE80211_SKB_CB(skb); 471 ieee80211_tx_info_clear_status(info); 472 473 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 474 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 475 if (tx_status == RTW89_TX_DONE) { 476 info->flags |= IEEE80211_TX_STAT_ACK; 477 tx_ring->tx_acked++; 478 } else { 479 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 480 rtw89_debug(rtwdev, RTW89_DBG_FW, 481 "failed to TX of status %x\n", tx_status); 482 switch (tx_status) { 483 case RTW89_TX_RETRY_LIMIT: 484 tx_ring->tx_retry_lmt++; 485 break; 486 case RTW89_TX_LIFE_TIME: 487 tx_ring->tx_life_time++; 488 break; 489 case RTW89_TX_MACID_DROP: 490 tx_ring->tx_mac_id_drop++; 491 break; 492 default: 493 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 494 break; 495 } 496 } 497 498 ieee80211_tx_status_ni(rtwdev->hw, skb); 499 } 500 501 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 502 { 503 struct rtw89_pci_tx_wd *txwd; 504 u32 cnt; 505 506 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 507 while (cnt--) { 508 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 509 if (!txwd) { 510 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 511 break; 512 } 513 514 list_del_init(&txwd->list); 515 516 /* this skb has been freed by RPP */ 517 if (skb_queue_len(&txwd->queue) == 0) 518 rtw89_pci_enqueue_txwd(tx_ring, txwd); 519 } 520 } 521 522 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 523 struct rtw89_pci_tx_ring *tx_ring) 524 { 525 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 526 struct rtw89_pci_tx_wd *txwd; 527 int i; 528 529 for (i = 0; i < wd_ring->page_num; i++) { 530 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 531 if (!txwd) 532 break; 533 534 list_del_init(&txwd->list); 535 } 536 } 537 538 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 539 struct rtw89_pci_tx_ring *tx_ring, 540 struct rtw89_pci_tx_wd *txwd, u16 seq, 541 u8 tx_status) 542 { 543 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 544 struct rtw89_pci_tx_data *tx_data; 545 struct sk_buff *skb, *tmp; 546 u8 txch = tx_ring->txch; 547 548 if (!list_empty(&txwd->list)) { 549 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 550 /* In low power mode, RPP can receive before updating of TX BD. 551 * In normal mode, it should not happen so give it a warning. 552 */ 553 if (!rtwpci->low_power && !list_empty(&txwd->list)) 554 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 555 txch, seq); 556 } 557 558 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 559 skb_unlink(skb, &txwd->queue); 560 561 tx_data = RTW89_PCI_TX_SKB_CB(skb); 562 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 563 DMA_TO_DEVICE); 564 565 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 566 } 567 568 if (list_empty(&txwd->list)) 569 rtw89_pci_enqueue_txwd(tx_ring, txwd); 570 } 571 572 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 573 struct rtw89_pci_rpp_fmt *rpp) 574 { 575 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 576 struct rtw89_pci_tx_ring *tx_ring; 577 struct rtw89_pci_tx_wd_ring *wd_ring; 578 struct rtw89_pci_tx_wd *txwd; 579 u16 seq; 580 u8 qsel, tx_status, txch; 581 582 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 583 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 584 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 585 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 586 587 if (txch == RTW89_TXCH_CH12) { 588 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 589 return; 590 } 591 592 tx_ring = &rtwpci->tx_rings[txch]; 593 wd_ring = &tx_ring->wd_ring; 594 txwd = &wd_ring->pages[seq]; 595 596 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 597 } 598 599 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 600 struct rtw89_pci_tx_ring *tx_ring) 601 { 602 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 603 struct rtw89_pci_tx_wd *txwd; 604 int i; 605 606 for (i = 0; i < wd_ring->page_num; i++) { 607 txwd = &wd_ring->pages[i]; 608 609 if (!list_empty(&txwd->list)) 610 continue; 611 612 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 613 } 614 } 615 616 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 617 struct rtw89_pci_rx_ring *rx_ring, 618 u32 max_cnt) 619 { 620 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 621 struct rtw89_pci_rx_info *rx_info; 622 struct rtw89_pci_rpp_fmt *rpp; 623 struct rtw89_rx_desc_info desc_info = {}; 624 struct sk_buff *skb; 625 u32 cnt = 0; 626 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 627 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 628 u32 skb_idx; 629 u32 offset; 630 int ret; 631 632 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 633 skb = rx_ring->buf[skb_idx]; 634 635 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 636 if (ret) { 637 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 638 bd_ring->wp, ret); 639 goto err_sync_device; 640 } 641 642 rx_info = RTW89_PCI_RX_SKB_CB(skb); 643 if (!rx_info->fs || !rx_info->ls) { 644 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 645 return cnt; 646 } 647 648 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 649 650 /* first segment has RX desc */ 651 offset = desc_info.offset + desc_info.rxd_len; 652 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 653 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 654 rtw89_pci_release_rpp(rtwdev, rpp); 655 } 656 657 rtw89_pci_sync_skb_for_device(rtwdev, skb); 658 rtw89_pci_rxbd_increase(rx_ring, 1); 659 cnt++; 660 661 return cnt; 662 663 err_sync_device: 664 rtw89_pci_sync_skb_for_device(rtwdev, skb); 665 return 0; 666 } 667 668 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 669 struct rtw89_pci_rx_ring *rx_ring, 670 u32 cnt) 671 { 672 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 673 u32 release_cnt; 674 675 while (cnt) { 676 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 677 if (!release_cnt) { 678 rtw89_err(rtwdev, "failed to release TX skbs\n"); 679 680 /* skip the rest RXBD bufs */ 681 rtw89_pci_rxbd_increase(rx_ring, cnt); 682 break; 683 } 684 685 cnt -= release_cnt; 686 } 687 688 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 689 } 690 691 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 692 struct rtw89_pci *rtwpci, int budget) 693 { 694 struct rtw89_pci_rx_ring *rx_ring; 695 u32 cnt; 696 int work_done; 697 698 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 699 700 spin_lock_bh(&rtwpci->trx_lock); 701 702 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 703 if (cnt == 0) 704 goto out_unlock; 705 706 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 707 708 out_unlock: 709 spin_unlock_bh(&rtwpci->trx_lock); 710 711 /* always release all RPQ */ 712 work_done = min_t(int, cnt, budget); 713 rtwdev->napi_budget_countdown -= work_done; 714 715 return work_done; 716 } 717 718 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 719 struct rtw89_pci *rtwpci) 720 { 721 struct rtw89_pci_rx_ring *rx_ring; 722 struct rtw89_pci_dma_ring *bd_ring; 723 u32 reg_idx; 724 u16 hw_idx, hw_idx_next, host_idx; 725 int i; 726 727 for (i = 0; i < RTW89_RXCH_NUM; i++) { 728 rx_ring = &rtwpci->rx_rings[i]; 729 bd_ring = &rx_ring->bd_ring; 730 731 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 732 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 733 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 734 hw_idx_next = (hw_idx + 1) % bd_ring->len; 735 736 if (hw_idx_next == host_idx) 737 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 738 739 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 740 "%d RXD unavailable, idx=0x%08x, len=%d\n", 741 i, reg_idx, bd_ring->len); 742 } 743 } 744 745 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 746 struct rtw89_pci *rtwpci, 747 struct rtw89_pci_isrs *isrs) 748 { 749 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 750 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 751 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 752 753 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 754 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 755 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 756 } 757 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 758 759 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 760 struct rtw89_pci *rtwpci, 761 struct rtw89_pci_isrs *isrs) 762 { 763 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 764 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 765 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 766 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 767 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 768 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 769 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 770 771 if (isrs->halt_c2h_isrs) 772 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 773 if (isrs->isrs[0]) 774 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 775 if (isrs->isrs[1]) 776 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 777 } 778 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 779 780 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, 781 struct rtw89_pci *rtwpci, 782 struct rtw89_pci_isrs *isrs) 783 { 784 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 785 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 786 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 787 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ? 788 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0; 789 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; 790 791 if (isrs->halt_c2h_isrs) 792 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 793 if (isrs->isrs[0]) 794 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]); 795 if (isrs->isrs[1]) 796 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 797 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 798 } 799 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); 800 801 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 802 { 803 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 804 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 805 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 806 } 807 EXPORT_SYMBOL(rtw89_pci_enable_intr); 808 809 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 810 { 811 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 812 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 813 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 814 } 815 EXPORT_SYMBOL(rtw89_pci_disable_intr); 816 817 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 818 { 819 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 820 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 821 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 822 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 823 } 824 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 825 826 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 827 { 828 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 829 } 830 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 831 832 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 833 { 834 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 835 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]); 836 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 837 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 838 } 839 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2); 840 841 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 842 { 843 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 844 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 845 } 846 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); 847 848 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 849 { 850 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 851 unsigned long flags; 852 853 spin_lock_irqsave(&rtwpci->irq_lock, flags); 854 rtw89_chip_disable_intr(rtwdev, rtwpci); 855 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 856 rtw89_chip_enable_intr(rtwdev, rtwpci); 857 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 858 } 859 860 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 861 { 862 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 863 unsigned long flags; 864 865 spin_lock_irqsave(&rtwpci->irq_lock, flags); 866 rtw89_chip_disable_intr(rtwdev, rtwpci); 867 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 868 rtw89_chip_enable_intr(rtwdev, rtwpci); 869 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 870 } 871 872 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 873 { 874 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 875 int budget = NAPI_POLL_WEIGHT; 876 877 /* To prevent RXQ get stuck due to run out of budget. */ 878 rtwdev->napi_budget_countdown = budget; 879 880 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 881 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 882 } 883 884 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 885 { 886 struct rtw89_dev *rtwdev = dev; 887 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 888 const struct rtw89_pci_info *info = rtwdev->pci_info; 889 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 890 struct rtw89_pci_isrs isrs; 891 unsigned long flags; 892 893 spin_lock_irqsave(&rtwpci->irq_lock, flags); 894 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 895 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 896 897 if (unlikely(isrs.isrs[0] & gen_def->isr_rdu)) 898 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 899 900 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h)) 901 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 902 903 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout)) 904 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 905 906 if (unlikely(rtwpci->under_recovery)) 907 goto enable_intr; 908 909 if (unlikely(rtwpci->low_power)) { 910 rtw89_pci_low_power_interrupt_handler(rtwdev); 911 goto enable_intr; 912 } 913 914 if (likely(rtwpci->running)) { 915 local_bh_disable(); 916 napi_schedule(&rtwdev->napi); 917 local_bh_enable(); 918 } 919 920 return IRQ_HANDLED; 921 922 enable_intr: 923 spin_lock_irqsave(&rtwpci->irq_lock, flags); 924 if (likely(rtwpci->running)) 925 rtw89_chip_enable_intr(rtwdev, rtwpci); 926 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 927 return IRQ_HANDLED; 928 } 929 930 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 931 { 932 struct rtw89_dev *rtwdev = dev; 933 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 934 unsigned long flags; 935 irqreturn_t irqret = IRQ_WAKE_THREAD; 936 937 spin_lock_irqsave(&rtwpci->irq_lock, flags); 938 939 /* If interrupt event is on the road, it is still trigger interrupt 940 * even we have done pci_stop() to turn off IMR. 941 */ 942 if (unlikely(!rtwpci->running)) { 943 irqret = IRQ_HANDLED; 944 goto exit; 945 } 946 947 rtw89_chip_disable_intr(rtwdev, rtwpci); 948 exit: 949 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 950 951 return irqret; 952 } 953 954 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ 955 [RTW89_TXCH_##ch_idx] = { \ 956 .num = R_##gen##_##txch##_TXBD_NUM ##v, \ 957 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 958 .bdram = 0, \ 959 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \ 960 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \ 961 } 962 963 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 964 [RTW89_TXCH_##txch] = { \ 965 .num = R_AX_##txch##_TXBD_NUM ##v, \ 966 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 967 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 968 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 969 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 970 } 971 972 #define DEF_TXCHADDRS(info, txch, v...) \ 973 [RTW89_TXCH_##txch] = { \ 974 .num = R_AX_##txch##_TXBD_NUM, \ 975 .idx = R_AX_##txch##_TXBD_IDX, \ 976 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 977 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 978 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 979 } 980 981 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ 982 [RTW89_RXCH_##ch_idx] = { \ 983 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \ 984 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \ 985 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \ 986 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \ 987 } 988 989 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 990 .tx = { 991 DEF_TXCHADDRS(info, ACH0), 992 DEF_TXCHADDRS(info, ACH1), 993 DEF_TXCHADDRS(info, ACH2), 994 DEF_TXCHADDRS(info, ACH3), 995 DEF_TXCHADDRS(info, ACH4), 996 DEF_TXCHADDRS(info, ACH5), 997 DEF_TXCHADDRS(info, ACH6), 998 DEF_TXCHADDRS(info, ACH7), 999 DEF_TXCHADDRS(info, CH8), 1000 DEF_TXCHADDRS(info, CH9), 1001 DEF_TXCHADDRS_TYPE1(info, CH10), 1002 DEF_TXCHADDRS_TYPE1(info, CH11), 1003 DEF_TXCHADDRS(info, CH12), 1004 }, 1005 .rx = { 1006 DEF_RXCHADDRS(AX, RXQ, RXQ), 1007 DEF_RXCHADDRS(AX, RPQ, RPQ), 1008 }, 1009 }; 1010 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 1011 1012 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 1013 .tx = { 1014 DEF_TXCHADDRS(info, ACH0, _V1), 1015 DEF_TXCHADDRS(info, ACH1, _V1), 1016 DEF_TXCHADDRS(info, ACH2, _V1), 1017 DEF_TXCHADDRS(info, ACH3, _V1), 1018 DEF_TXCHADDRS(info, ACH4, _V1), 1019 DEF_TXCHADDRS(info, ACH5, _V1), 1020 DEF_TXCHADDRS(info, ACH6, _V1), 1021 DEF_TXCHADDRS(info, ACH7, _V1), 1022 DEF_TXCHADDRS(info, CH8, _V1), 1023 DEF_TXCHADDRS(info, CH9, _V1), 1024 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 1025 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 1026 DEF_TXCHADDRS(info, CH12, _V1), 1027 }, 1028 .rx = { 1029 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1), 1030 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1), 1031 }, 1032 }; 1033 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 1034 1035 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = { 1036 .tx = { 1037 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1), 1038 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1), 1039 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1), 1040 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1), 1041 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1), 1042 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1), 1043 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1), 1044 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1), 1045 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1), 1046 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1), 1047 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1), 1048 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1), 1049 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1), 1050 }, 1051 .rx = { 1052 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1), 1053 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1), 1054 }, 1055 }; 1056 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); 1057 1058 #undef DEF_TXCHADDRS_TYPE1 1059 #undef DEF_TXCHADDRS 1060 #undef DEF_RXCHADDRS 1061 1062 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 1063 enum rtw89_tx_channel txch, 1064 const struct rtw89_pci_ch_dma_addr **addr) 1065 { 1066 const struct rtw89_pci_info *info = rtwdev->pci_info; 1067 1068 if (txch >= RTW89_TXCH_NUM) 1069 return -EINVAL; 1070 1071 *addr = &info->dma_addr_set->tx[txch]; 1072 1073 return 0; 1074 } 1075 1076 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 1077 enum rtw89_rx_channel rxch, 1078 const struct rtw89_pci_ch_dma_addr **addr) 1079 { 1080 const struct rtw89_pci_info *info = rtwdev->pci_info; 1081 1082 if (rxch >= RTW89_RXCH_NUM) 1083 return -EINVAL; 1084 1085 *addr = &info->dma_addr_set->rx[rxch]; 1086 1087 return 0; 1088 } 1089 1090 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 1091 { 1092 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 1093 1094 /* reserved 1 desc check ring is full or not */ 1095 if (bd_ring->rp > bd_ring->wp) 1096 return bd_ring->rp - bd_ring->wp - 1; 1097 1098 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 1099 } 1100 1101 static 1102 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 1103 { 1104 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1105 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 1106 u32 cnt; 1107 1108 spin_lock_bh(&rtwpci->trx_lock); 1109 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 1110 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1111 spin_unlock_bh(&rtwpci->trx_lock); 1112 1113 return cnt; 1114 } 1115 1116 static 1117 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 1118 u8 txch) 1119 { 1120 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1121 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1122 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1123 u32 cnt; 1124 1125 spin_lock_bh(&rtwpci->trx_lock); 1126 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1127 if (txch != RTW89_TXCH_CH12) 1128 cnt = min(cnt, wd_ring->curr_num); 1129 spin_unlock_bh(&rtwpci->trx_lock); 1130 1131 return cnt; 1132 } 1133 1134 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1135 u8 txch) 1136 { 1137 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1138 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1139 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1140 const struct rtw89_chip_info *chip = rtwdev->chip; 1141 u32 bd_cnt, wd_cnt, min_cnt = 0; 1142 struct rtw89_pci_rx_ring *rx_ring; 1143 enum rtw89_debug_mask debug_mask; 1144 u32 cnt; 1145 1146 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 1147 1148 spin_lock_bh(&rtwpci->trx_lock); 1149 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1150 wd_cnt = wd_ring->curr_num; 1151 1152 if (wd_cnt == 0 || bd_cnt == 0) { 1153 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 1154 if (cnt) 1155 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 1156 else if (wd_cnt == 0) 1157 goto out_unlock; 1158 1159 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1160 if (bd_cnt == 0) 1161 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 1162 } 1163 1164 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1165 wd_cnt = wd_ring->curr_num; 1166 min_cnt = min(bd_cnt, wd_cnt); 1167 if (min_cnt == 0) { 1168 /* This message can be frequently shown in low power mode or 1169 * high traffic with small FIFO chips, and we have recognized it as normal 1170 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1171 */ 1172 if (rtwpci->low_power || chip->small_fifo_size) 1173 debug_mask = RTW89_DBG_TXRX; 1174 else 1175 debug_mask = RTW89_DBG_UNEXP; 1176 1177 rtw89_debug(rtwdev, debug_mask, 1178 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1179 wd_cnt, bd_cnt); 1180 } 1181 1182 out_unlock: 1183 spin_unlock_bh(&rtwpci->trx_lock); 1184 1185 return min_cnt; 1186 } 1187 1188 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1189 u8 txch) 1190 { 1191 if (rtwdev->hci.paused) 1192 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1193 1194 if (txch == RTW89_TXCH_CH12) 1195 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1196 1197 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1198 } 1199 1200 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1201 { 1202 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1203 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1204 u32 host_idx, addr; 1205 1206 spin_lock_bh(&rtwpci->trx_lock); 1207 1208 addr = bd_ring->addr.idx; 1209 host_idx = bd_ring->wp; 1210 rtw89_write16(rtwdev, addr, host_idx); 1211 1212 spin_unlock_bh(&rtwpci->trx_lock); 1213 } 1214 1215 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1216 int n_txbd) 1217 { 1218 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1219 u32 host_idx, len; 1220 1221 len = bd_ring->len; 1222 host_idx = bd_ring->wp + n_txbd; 1223 host_idx = host_idx < len ? host_idx : host_idx - len; 1224 1225 bd_ring->wp = host_idx; 1226 } 1227 1228 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1229 { 1230 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1231 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1232 1233 if (rtwdev->hci.paused) { 1234 set_bit(txch, rtwpci->kick_map); 1235 return; 1236 } 1237 1238 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1239 } 1240 1241 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1242 { 1243 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1244 struct rtw89_pci_tx_ring *tx_ring; 1245 int txch; 1246 1247 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1248 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1249 continue; 1250 1251 tx_ring = &rtwpci->tx_rings[txch]; 1252 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1253 } 1254 } 1255 1256 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1257 { 1258 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1259 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1260 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1261 u32 cur_idx, cur_rp; 1262 u8 i; 1263 1264 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1265 * define a reasonable fixed total timeout to use read_poll_timeout* 1266 * helper. Instead, we can ensure a reasonable polling times, so we 1267 * just use for loop with udelay here. 1268 */ 1269 for (i = 0; i < 60; i++) { 1270 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1271 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1272 if (cur_rp == bd_ring->wp) 1273 return; 1274 1275 udelay(1); 1276 } 1277 1278 if (!drop) 1279 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1280 } 1281 1282 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1283 bool drop) 1284 { 1285 const struct rtw89_pci_info *info = rtwdev->pci_info; 1286 u8 i; 1287 1288 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1289 /* It may be unnecessary to flush FWCMD queue. */ 1290 if (i == RTW89_TXCH_CH12) 1291 continue; 1292 if (info->tx_dma_ch_mask & BIT(i)) 1293 continue; 1294 1295 if (txchs & BIT(i)) 1296 __pci_flush_txch(rtwdev, i, drop); 1297 } 1298 } 1299 1300 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1301 bool drop) 1302 { 1303 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1304 } 1305 1306 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1307 void *txaddr_info_addr, u32 total_len, 1308 dma_addr_t dma, u8 *add_info_nr) 1309 { 1310 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1311 __le16 option; 1312 1313 txaddr_info->length = cpu_to_le16(total_len); 1314 option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1)); 1315 option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK); 1316 txaddr_info->option = option; 1317 txaddr_info->dma = cpu_to_le32(dma); 1318 1319 *add_info_nr = 1; 1320 1321 return sizeof(*txaddr_info); 1322 } 1323 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1324 1325 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1326 void *txaddr_info_addr, u32 total_len, 1327 dma_addr_t dma, u8 *add_info_nr) 1328 { 1329 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1330 u32 remain = total_len; 1331 u32 len; 1332 u16 length_option; 1333 int n; 1334 1335 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1336 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1337 TXADDR_INFO_LENTHG_V1_MAX : remain; 1338 remain -= len; 1339 1340 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1341 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1342 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1343 length_option |= u16_encode_bits(upper_32_bits(dma), 1344 B_PCIADDR_HIGH_SEL_V1_MASK); 1345 txaddr_info->length_opt = cpu_to_le16(length_option); 1346 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1347 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1348 1349 dma += len; 1350 txaddr_info++; 1351 } 1352 1353 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1354 remain, total_len); 1355 1356 *add_info_nr = n; 1357 1358 return n * sizeof(*txaddr_info); 1359 } 1360 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1361 1362 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1363 struct rtw89_pci_tx_ring *tx_ring, 1364 struct rtw89_pci_tx_wd *txwd, 1365 struct rtw89_core_tx_request *tx_req) 1366 { 1367 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1368 const struct rtw89_chip_info *chip = rtwdev->chip; 1369 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1370 struct rtw89_pci_tx_wp_info *txwp_info; 1371 void *txaddr_info_addr; 1372 struct pci_dev *pdev = rtwpci->pdev; 1373 struct sk_buff *skb = tx_req->skb; 1374 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1375 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1376 bool en_wd_info = desc_info->en_wd_info; 1377 u32 txwd_len; 1378 u32 txwp_len; 1379 u32 txaddr_info_len; 1380 dma_addr_t dma; 1381 int ret; 1382 1383 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1384 if (dma_mapping_error(&pdev->dev, dma)) { 1385 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1386 ret = -EBUSY; 1387 goto err; 1388 } 1389 1390 tx_data->dma = dma; 1391 rcu_assign_pointer(skb_data->wait, NULL); 1392 1393 txwp_len = sizeof(*txwp_info); 1394 txwd_len = chip->txwd_body_size; 1395 txwd_len += en_wd_info ? chip->txwd_info_size : 0; 1396 1397 #if defined(__linux__) 1398 txwp_info = txwd->vaddr + txwd_len; 1399 #elif defined(__FreeBSD__) 1400 txwp_info = (struct rtw89_pci_tx_wp_info *)((u8 *)txwd->vaddr + txwd_len); 1401 #endif 1402 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1403 txwp_info->seq1 = 0; 1404 txwp_info->seq2 = 0; 1405 txwp_info->seq3 = 0; 1406 1407 tx_ring->tx_cnt++; 1408 #if defined(__linux__) 1409 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1410 #elif defined(__FreeBSD__) 1411 txaddr_info_addr = (u8 *)txwd->vaddr + txwd_len + txwp_len; 1412 #endif 1413 txaddr_info_len = 1414 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1415 dma, &desc_info->addr_info_nr); 1416 1417 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1418 1419 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1420 1421 skb_queue_tail(&txwd->queue, skb); 1422 1423 return 0; 1424 1425 err: 1426 return ret; 1427 } 1428 1429 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1430 struct rtw89_pci_tx_ring *tx_ring, 1431 struct rtw89_pci_tx_bd_32 *txbd, 1432 struct rtw89_core_tx_request *tx_req) 1433 { 1434 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1435 const struct rtw89_chip_info *chip = rtwdev->chip; 1436 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1437 void *txdesc; 1438 int txdesc_size = chip->h2c_desc_size; 1439 struct pci_dev *pdev = rtwpci->pdev; 1440 struct sk_buff *skb = tx_req->skb; 1441 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1442 dma_addr_t dma; 1443 __le16 opt; 1444 1445 txdesc = skb_push(skb, txdesc_size); 1446 memset(txdesc, 0, txdesc_size); 1447 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1448 1449 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1450 if (dma_mapping_error(&pdev->dev, dma)) { 1451 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1452 return -EBUSY; 1453 } 1454 1455 tx_data->dma = dma; 1456 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1457 opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI); 1458 txbd->opt = opt; 1459 txbd->length = cpu_to_le16(skb->len); 1460 txbd->dma = cpu_to_le32(tx_data->dma); 1461 skb_queue_tail(&rtwpci->h2c_queue, skb); 1462 1463 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1464 1465 return 0; 1466 } 1467 1468 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1469 struct rtw89_pci_tx_ring *tx_ring, 1470 struct rtw89_pci_tx_bd_32 *txbd, 1471 struct rtw89_core_tx_request *tx_req) 1472 { 1473 struct rtw89_pci_tx_wd *txwd; 1474 __le16 opt; 1475 int ret; 1476 1477 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1478 * buffer with WD BODY only. So here we don't need to check the free 1479 * pages of the wd ring. 1480 */ 1481 if (tx_ring->txch == RTW89_TXCH_CH12) 1482 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1483 1484 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1485 if (!txwd) { 1486 rtw89_err(rtwdev, "no available TXWD\n"); 1487 ret = -ENOSPC; 1488 goto err; 1489 } 1490 1491 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1492 if (ret) { 1493 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1494 goto err_enqueue_wd; 1495 } 1496 1497 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1498 1499 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1500 opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI); 1501 txbd->opt = opt; 1502 txbd->length = cpu_to_le16(txwd->len); 1503 txbd->dma = cpu_to_le32(txwd->paddr); 1504 1505 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1506 1507 return 0; 1508 1509 err_enqueue_wd: 1510 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1511 err: 1512 return ret; 1513 } 1514 1515 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1516 u8 txch) 1517 { 1518 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1519 struct rtw89_pci_tx_ring *tx_ring; 1520 struct rtw89_pci_tx_bd_32 *txbd; 1521 u32 n_avail_txbd; 1522 int ret = 0; 1523 1524 /* check the tx type and dma channel for fw cmd queue */ 1525 if ((txch == RTW89_TXCH_CH12 || 1526 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1527 (txch != RTW89_TXCH_CH12 || 1528 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1529 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1530 return -EINVAL; 1531 } 1532 1533 tx_ring = &rtwpci->tx_rings[txch]; 1534 spin_lock_bh(&rtwpci->trx_lock); 1535 1536 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1537 if (n_avail_txbd == 0) { 1538 rtw89_err(rtwdev, "no available TXBD\n"); 1539 ret = -ENOSPC; 1540 goto err_unlock; 1541 } 1542 1543 txbd = rtw89_pci_get_next_txbd(tx_ring); 1544 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1545 if (ret) { 1546 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1547 goto err_unlock; 1548 } 1549 1550 spin_unlock_bh(&rtwpci->trx_lock); 1551 return 0; 1552 1553 err_unlock: 1554 spin_unlock_bh(&rtwpci->trx_lock); 1555 return ret; 1556 } 1557 1558 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1559 { 1560 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1561 int ret; 1562 1563 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1564 if (ret) { 1565 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1566 return ret; 1567 } 1568 1569 return 0; 1570 } 1571 1572 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1573 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1574 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1575 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1576 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1577 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1578 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1579 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1580 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1581 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1582 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1583 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1584 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1585 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1586 }; 1587 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1588 1589 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1590 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1591 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1592 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1593 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1594 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1595 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1596 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1597 }; 1598 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1599 1600 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev) 1601 { 1602 const struct rtw89_pci_info *info = rtwdev->pci_info; 1603 u32 addr = info->wp_sel_addr; 1604 u32 val; 1605 int i; 1606 1607 if (!info->wp_sel_addr) 1608 return; 1609 1610 for (i = 0; i < 16; i += 4) { 1611 val = u32_encode_bits(i + 0, MASKBYTE0) | 1612 u32_encode_bits(i + 1, MASKBYTE1) | 1613 u32_encode_bits(i + 2, MASKBYTE2) | 1614 u32_encode_bits(i + 3, MASKBYTE3); 1615 rtw89_write32(rtwdev, addr + i, val); 1616 } 1617 } 1618 1619 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1620 { 1621 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1622 const struct rtw89_pci_info *info = rtwdev->pci_info; 1623 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1624 struct rtw89_pci_tx_ring *tx_ring; 1625 struct rtw89_pci_rx_ring *rx_ring; 1626 struct rtw89_pci_dma_ring *bd_ring; 1627 const struct rtw89_pci_bd_ram *bd_ram; 1628 u32 addr_num; 1629 u32 addr_idx; 1630 u32 addr_bdram; 1631 u32 addr_desa_l; 1632 u32 val32; 1633 int i; 1634 1635 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1636 if (info->tx_dma_ch_mask & BIT(i)) 1637 continue; 1638 1639 tx_ring = &rtwpci->tx_rings[i]; 1640 bd_ring = &tx_ring->bd_ring; 1641 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; 1642 addr_num = bd_ring->addr.num; 1643 addr_bdram = bd_ring->addr.bdram; 1644 addr_desa_l = bd_ring->addr.desa_l; 1645 bd_ring->wp = 0; 1646 bd_ring->rp = 0; 1647 1648 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1649 if (addr_bdram && bd_ram) { 1650 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1651 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1652 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1653 1654 rtw89_write32(rtwdev, addr_bdram, val32); 1655 } 1656 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1657 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1658 } 1659 1660 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1661 rx_ring = &rtwpci->rx_rings[i]; 1662 bd_ring = &rx_ring->bd_ring; 1663 addr_num = bd_ring->addr.num; 1664 addr_idx = bd_ring->addr.idx; 1665 addr_desa_l = bd_ring->addr.desa_l; 1666 if (info->rx_ring_eq_is_full) 1667 bd_ring->wp = bd_ring->len - 1; 1668 else 1669 bd_ring->wp = 0; 1670 bd_ring->rp = 0; 1671 rx_ring->diliver_skb = NULL; 1672 rx_ring->diliver_desc.ready = false; 1673 rx_ring->target_rx_tag = 0; 1674 1675 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1676 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1677 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1678 1679 if (info->rx_ring_eq_is_full) 1680 rtw89_write16(rtwdev, addr_idx, bd_ring->wp); 1681 } 1682 1683 rtw89_pci_init_wp_16sel(rtwdev); 1684 } 1685 1686 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1687 struct rtw89_pci_tx_ring *tx_ring) 1688 { 1689 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1690 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1691 } 1692 1693 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1694 { 1695 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1696 const struct rtw89_pci_info *info = rtwdev->pci_info; 1697 int txch; 1698 1699 rtw89_pci_reset_trx_rings(rtwdev); 1700 1701 spin_lock_bh(&rtwpci->trx_lock); 1702 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1703 if (info->tx_dma_ch_mask & BIT(txch)) 1704 continue; 1705 if (txch == RTW89_TXCH_CH12) { 1706 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1707 skb_queue_len(&rtwpci->h2c_queue), true); 1708 continue; 1709 } 1710 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1711 } 1712 spin_unlock_bh(&rtwpci->trx_lock); 1713 } 1714 1715 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1716 { 1717 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1718 unsigned long flags; 1719 1720 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1721 rtwpci->running = true; 1722 rtw89_chip_enable_intr(rtwdev, rtwpci); 1723 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1724 } 1725 1726 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1727 { 1728 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1729 unsigned long flags; 1730 1731 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1732 rtwpci->running = false; 1733 rtw89_chip_disable_intr(rtwdev, rtwpci); 1734 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1735 } 1736 1737 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1738 { 1739 rtw89_core_napi_start(rtwdev); 1740 rtw89_pci_enable_intr_lock(rtwdev); 1741 1742 return 0; 1743 } 1744 1745 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1746 { 1747 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1748 struct pci_dev *pdev = rtwpci->pdev; 1749 1750 rtw89_pci_disable_intr_lock(rtwdev); 1751 synchronize_irq(pdev->irq); 1752 rtw89_core_napi_stop(rtwdev); 1753 } 1754 1755 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1756 { 1757 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1758 struct pci_dev *pdev = rtwpci->pdev; 1759 1760 if (pause) { 1761 rtw89_pci_disable_intr_lock(rtwdev); 1762 synchronize_irq(pdev->irq); 1763 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1764 napi_synchronize(&rtwdev->napi); 1765 } else { 1766 rtw89_pci_enable_intr_lock(rtwdev); 1767 rtw89_pci_tx_kick_off_pending(rtwdev); 1768 } 1769 } 1770 1771 static 1772 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1773 { 1774 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1775 const struct rtw89_pci_info *info = rtwdev->pci_info; 1776 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1777 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1778 struct rtw89_pci_tx_ring *tx_ring; 1779 struct rtw89_pci_rx_ring *rx_ring; 1780 int i; 1781 1782 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1783 return; 1784 1785 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1786 tx_ring = &rtwpci->tx_rings[i]; 1787 tx_ring->bd_ring.addr.idx = low_power ? 1788 bd_idx_addr->tx_bd_addrs[i] : 1789 dma_addr_set->tx[i].idx; 1790 } 1791 1792 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1793 rx_ring = &rtwpci->rx_rings[i]; 1794 rx_ring->bd_ring.addr.idx = low_power ? 1795 bd_idx_addr->rx_bd_addrs[i] : 1796 dma_addr_set->rx[i].idx; 1797 } 1798 } 1799 1800 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1801 { 1802 enum rtw89_pci_intr_mask_cfg cfg; 1803 1804 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1805 1806 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1807 rtw89_chip_config_intr_mask(rtwdev, cfg); 1808 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1809 } 1810 1811 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1812 1813 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1814 { 1815 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1816 #if defined(__linux__) 1817 u32 val = readl(rtwpci->mmap + addr); 1818 #elif defined(__FreeBSD__) 1819 u32 val; 1820 1821 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1822 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1823 #endif 1824 int count; 1825 1826 for (count = 0; ; count++) { 1827 if (val != RTW89_R32_DEAD) 1828 return val; 1829 if (count >= MAC_REG_POOL_COUNT) { 1830 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1831 return RTW89_R32_DEAD; 1832 } 1833 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1834 #if defined(__linux__) 1835 val = readl(rtwpci->mmap + addr); 1836 #elif defined(__FreeBSD__) 1837 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1838 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1839 #endif 1840 } 1841 1842 return val; 1843 } 1844 1845 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1846 { 1847 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1848 u32 addr32, val32, shift; 1849 1850 if (!ACCESS_CMAC(addr)) 1851 #if defined(__linux__) 1852 return readb(rtwpci->mmap + addr); 1853 #elif defined(__FreeBSD__) 1854 { 1855 u8 val; 1856 1857 val = bus_read_1((struct resource *)rtwpci->mmap, addr); 1858 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); 1859 return (val); 1860 } 1861 #endif 1862 1863 addr32 = addr & ~0x3; 1864 shift = (addr & 0x3) * 8; 1865 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1866 return val32 >> shift; 1867 } 1868 1869 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1870 { 1871 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1872 u32 addr32, val32, shift; 1873 1874 if (!ACCESS_CMAC(addr)) 1875 #if defined(__linux__) 1876 return readw(rtwpci->mmap + addr); 1877 #elif defined(__FreeBSD__) 1878 { 1879 u16 val; 1880 1881 val = bus_read_2((struct resource *)rtwpci->mmap, addr); 1882 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); 1883 return (val); 1884 } 1885 #endif 1886 1887 addr32 = addr & ~0x3; 1888 shift = (addr & 0x3) * 8; 1889 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1890 return val32 >> shift; 1891 } 1892 1893 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1894 { 1895 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1896 1897 if (!ACCESS_CMAC(addr)) 1898 #if defined(__linux__) 1899 return readl(rtwpci->mmap + addr); 1900 #elif defined(__FreeBSD__) 1901 { 1902 u32 val; 1903 1904 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1905 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1906 return (val); 1907 } 1908 #endif 1909 1910 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1911 } 1912 1913 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1914 { 1915 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1916 1917 #if defined(__linux__) 1918 writeb(data, rtwpci->mmap + addr); 1919 #elif defined(__FreeBSD__) 1920 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, data); 1921 return (bus_write_1((struct resource *)rtwpci->mmap, addr, data)); 1922 #endif 1923 } 1924 1925 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1926 { 1927 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1928 1929 #if defined(__linux__) 1930 writew(data, rtwpci->mmap + addr); 1931 #elif defined(__FreeBSD__) 1932 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, data); 1933 return (bus_write_2((struct resource *)rtwpci->mmap, addr, data)); 1934 #endif 1935 } 1936 1937 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1938 { 1939 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1940 1941 #if defined(__linux__) 1942 writel(data, rtwpci->mmap + addr); 1943 #elif defined(__FreeBSD__) 1944 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, data); 1945 return (bus_write_4((struct resource *)rtwpci->mmap, addr, data)); 1946 #endif 1947 } 1948 1949 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1950 { 1951 const struct rtw89_pci_info *info = rtwdev->pci_info; 1952 1953 if (enable) 1954 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1955 info->rxhci_en_bit | info->txhci_en_bit); 1956 else 1957 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1958 info->rxhci_en_bit | info->txhci_en_bit); 1959 } 1960 1961 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1962 { 1963 const struct rtw89_pci_info *info = rtwdev->pci_info; 1964 const struct rtw89_reg_def *reg = &info->dma_io_stop; 1965 1966 if (enable) 1967 rtw89_write32_clr(rtwdev, reg->addr, reg->mask); 1968 else 1969 rtw89_write32_set(rtwdev, reg->addr, reg->mask); 1970 } 1971 1972 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1973 { 1974 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1975 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1976 } 1977 1978 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1979 { 1980 u16 val; 1981 1982 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1983 1984 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1985 switch (speed) { 1986 case PCIE_PHY_GEN1: 1987 if (addr < 0x20) 1988 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1989 else 1990 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1991 break; 1992 case PCIE_PHY_GEN2: 1993 if (addr < 0x20) 1994 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1995 else 1996 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1997 break; 1998 default: 1999 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 2000 return -EINVAL; 2001 } 2002 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 2003 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 2004 2005 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 2006 false, rtwdev, R_AX_MDIO_CFG); 2007 } 2008 2009 static int 2010 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 2011 { 2012 int ret; 2013 2014 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 2015 if (ret) { 2016 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 2017 return ret; 2018 } 2019 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 2020 2021 return 0; 2022 } 2023 2024 static int 2025 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 2026 { 2027 int ret; 2028 2029 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 2030 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 2031 if (ret) { 2032 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 2033 return ret; 2034 } 2035 2036 return 0; 2037 } 2038 2039 static int 2040 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 2041 { 2042 u32 shift; 2043 int ret; 2044 u16 val; 2045 2046 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2047 if (ret) 2048 return ret; 2049 2050 shift = __ffs(mask); 2051 val &= ~mask; 2052 val |= ((data << shift) & mask); 2053 2054 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 2055 if (ret) 2056 return ret; 2057 2058 return 0; 2059 } 2060 2061 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2062 { 2063 int ret; 2064 u16 val; 2065 2066 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2067 if (ret) 2068 return ret; 2069 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 2070 if (ret) 2071 return ret; 2072 2073 return 0; 2074 } 2075 2076 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2077 { 2078 int ret; 2079 u16 val; 2080 2081 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2082 if (ret) 2083 return ret; 2084 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 2085 if (ret) 2086 return ret; 2087 2088 return 0; 2089 } 2090 2091 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) 2092 { 2093 u16 addr_2lsb = addr & B_AX_DBI_2LSB; 2094 u16 write_addr; 2095 u8 flag; 2096 int ret; 2097 2098 write_addr = addr & B_AX_DBI_ADDR_MSK; 2099 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK); 2100 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data); 2101 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); 2102 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); 2103 2104 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2105 10 * RTW89_PCI_WR_RETRY_CNT, false, 2106 rtwdev, R_AX_DBI_FLAG + 2); 2107 if (ret) 2108 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n", 2109 addr); 2110 2111 return ret; 2112 } 2113 2114 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) 2115 { 2116 u16 read_addr = addr & B_AX_DBI_ADDR_MSK; 2117 u8 flag; 2118 int ret; 2119 2120 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); 2121 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); 2122 2123 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2124 10 * RTW89_PCI_WR_RETRY_CNT, false, 2125 rtwdev, R_AX_DBI_FLAG + 2); 2126 if (ret) { 2127 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n", 2128 addr); 2129 return ret; 2130 } 2131 2132 read_addr = R_AX_DBI_RDATA + (addr & 3); 2133 *value = rtw89_read8(rtwdev, read_addr); 2134 2135 return 0; 2136 } 2137 2138 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2139 u8 data) 2140 { 2141 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2142 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2143 struct pci_dev *pdev = rtwpci->pdev; 2144 int ret; 2145 2146 ret = pci_write_config_byte(pdev, addr, data); 2147 if (!ret) 2148 return 0; 2149 2150 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2151 ret = rtw89_dbi_write8(rtwdev, addr, data); 2152 2153 return ret; 2154 } 2155 2156 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2157 u8 *value) 2158 { 2159 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2160 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2161 struct pci_dev *pdev = rtwpci->pdev; 2162 int ret; 2163 2164 ret = pci_read_config_byte(pdev, addr, value); 2165 if (!ret) 2166 return 0; 2167 2168 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2169 ret = rtw89_dbi_read8(rtwdev, addr, value); 2170 2171 return ret; 2172 } 2173 2174 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 2175 u8 bit) 2176 { 2177 u8 value; 2178 int ret; 2179 2180 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2181 if (ret) 2182 return ret; 2183 2184 value |= bit; 2185 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2186 2187 return ret; 2188 } 2189 2190 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 2191 u8 bit) 2192 { 2193 u8 value; 2194 int ret; 2195 2196 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2197 if (ret) 2198 return ret; 2199 2200 value &= ~bit; 2201 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2202 2203 return ret; 2204 } 2205 2206 static int 2207 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 2208 { 2209 u16 val, tar; 2210 int ret; 2211 2212 /* Enable counter */ 2213 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 2214 if (ret) 2215 return ret; 2216 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2217 phy_rate); 2218 if (ret) 2219 return ret; 2220 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 2221 phy_rate); 2222 if (ret) 2223 return ret; 2224 2225 fsleep(300); 2226 2227 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 2228 if (ret) 2229 return ret; 2230 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2231 phy_rate); 2232 if (ret) 2233 return ret; 2234 2235 tar = tar & 0x0FFF; 2236 if (tar == 0 || tar == 0x0FFF) { 2237 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 2238 return -EINVAL; 2239 } 2240 2241 *target = tar; 2242 2243 return 0; 2244 } 2245 2246 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 2247 { 2248 int ret; 2249 2250 if (!rtw89_is_rtl885xb(rtwdev)) 2251 return 0; 2252 2253 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 2254 PCIE_AUTOK_4, PCIE_PHY_GEN1); 2255 return ret; 2256 } 2257 2258 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 2259 { 2260 enum rtw89_pcie_phy phy_rate; 2261 u16 val16, mgn_set, div_set, tar; 2262 u8 val8, bdr_ori; 2263 bool l1_flag = false; 2264 int ret = 0; 2265 2266 if (!rtw89_is_rtl885xb(rtwdev)) 2267 return 0; 2268 2269 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 2270 if (ret) { 2271 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 2272 RTW89_PCIE_PHY_RATE); 2273 return ret; 2274 } 2275 2276 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 2277 phy_rate = PCIE_PHY_GEN1; 2278 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 2279 phy_rate = PCIE_PHY_GEN2; 2280 } else { 2281 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 2282 return -EOPNOTSUPP; 2283 } 2284 /* Disable L1BD */ 2285 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 2286 if (ret) { 2287 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 2288 return ret; 2289 } 2290 2291 if (bdr_ori & RTW89_PCIE_BIT_L1) { 2292 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2293 bdr_ori & ~RTW89_PCIE_BIT_L1); 2294 if (ret) { 2295 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2296 RTW89_PCIE_L1_CTRL); 2297 return ret; 2298 } 2299 l1_flag = true; 2300 } 2301 2302 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2303 if (ret) { 2304 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2305 goto end; 2306 } 2307 2308 if (val16 & B_AX_CALIB_EN) { 2309 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 2310 val16 & ~B_AX_CALIB_EN, phy_rate); 2311 if (ret) { 2312 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2313 goto end; 2314 } 2315 } 2316 2317 if (!autook_en) 2318 goto end; 2319 /* Set div */ 2320 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 2321 if (ret) { 2322 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2323 goto end; 2324 } 2325 2326 /* Obtain div and margin */ 2327 ret = __get_target(rtwdev, &tar, phy_rate); 2328 if (ret) { 2329 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2330 goto end; 2331 } 2332 2333 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2334 2335 if (mgn_set >= 128) { 2336 div_set = 0x0003; 2337 mgn_set = 0x000F; 2338 } else if (mgn_set >= 64) { 2339 div_set = 0x0003; 2340 mgn_set >>= 3; 2341 } else if (mgn_set >= 32) { 2342 div_set = 0x0002; 2343 mgn_set >>= 2; 2344 } else if (mgn_set >= 16) { 2345 div_set = 0x0001; 2346 mgn_set >>= 1; 2347 } else if (mgn_set == 0) { 2348 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2349 goto end; 2350 } else { 2351 div_set = 0x0000; 2352 } 2353 2354 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2355 if (ret) { 2356 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2357 goto end; 2358 } 2359 2360 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2361 2362 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2363 if (ret) { 2364 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2365 goto end; 2366 } 2367 2368 ret = __get_target(rtwdev, &tar, phy_rate); 2369 if (ret) { 2370 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2371 goto end; 2372 } 2373 2374 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2375 tar, div_set, mgn_set); 2376 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2377 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2378 if (ret) { 2379 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2380 goto end; 2381 } 2382 2383 /* Enable function */ 2384 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2385 if (ret) { 2386 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2387 goto end; 2388 } 2389 2390 /* CLK delay = 0 */ 2391 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2392 PCIE_CLKDLY_HW_0); 2393 2394 end: 2395 /* Set L1BD to ori */ 2396 if (l1_flag) { 2397 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2398 bdr_ori); 2399 if (ret) { 2400 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2401 RTW89_PCIE_L1_CTRL); 2402 return ret; 2403 } 2404 } 2405 2406 return ret; 2407 } 2408 2409 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2410 { 2411 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2412 int ret; 2413 2414 if (chip_id == RTL8852A) { 2415 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2416 PCIE_PHY_GEN1); 2417 if (ret) 2418 return ret; 2419 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2420 PCIE_PHY_GEN2); 2421 if (ret) 2422 return ret; 2423 } else if (chip_id == RTL8852C) { 2424 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2425 B_AX_DEGLITCH); 2426 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2427 B_AX_DEGLITCH); 2428 } 2429 2430 return 0; 2431 } 2432 2433 static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev) 2434 { 2435 u16 g1_oobs, g2_oobs; 2436 u32 backup_aspm; 2437 u32 phy_offset; 2438 u16 oobs_val; 2439 int ret; 2440 2441 if (rtwdev->chip->chip_id != RTL8852C) 2442 return; 2443 2444 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2445 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2446 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + 2447 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2448 if (g1_oobs && g2_oobs) 2449 return; 2450 2451 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); 2452 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 2453 2454 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); 2455 if (ret) 2456 goto out; 2457 2458 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN); 2459 rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); 2460 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); 2461 2462 oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, 2463 OOBS_LEVEL_MASK); 2464 2465 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, 2466 OOBS_SEN_MASK, oobs_val); 2467 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, 2468 BAC_OOBS_SEL); 2469 2470 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, 2471 OOBS_SEN_MASK, oobs_val); 2472 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, 2473 BAC_OOBS_SEL); 2474 2475 out: 2476 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm); 2477 } 2478 2479 static void rtw89_pci_ber(struct rtw89_dev *rtwdev) 2480 { 2481 u32 phy_offset; 2482 2483 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks)) 2484 return; 2485 2486 phy_offset = R_RAC_DIRECT_OFFSET_G1; 2487 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL); 2488 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2489 2490 phy_offset = R_RAC_DIRECT_OFFSET_G2; 2491 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL); 2492 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2493 } 2494 2495 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2496 { 2497 if (rtwdev->chip->chip_id != RTL8852A) 2498 return; 2499 2500 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2501 } 2502 2503 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2504 { 2505 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2506 2507 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2508 return; 2509 2510 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2511 } 2512 2513 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2514 { 2515 int ret; 2516 2517 if (rtwdev->chip->chip_id != RTL8852A) 2518 return 0; 2519 2520 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2521 PCIE_PHY_GEN1); 2522 if (ret) 2523 return ret; 2524 2525 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2526 PCIE_PHY_GEN2); 2527 if (ret) 2528 return ret; 2529 2530 return 0; 2531 } 2532 2533 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2534 { 2535 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2536 2537 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2538 return; 2539 2540 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2541 } 2542 2543 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2544 { 2545 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2546 2547 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2548 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2549 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2550 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2551 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2552 } else if (rtwdev->chip->chip_id == RTL8852C) { 2553 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2554 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2555 } 2556 } 2557 2558 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2559 { 2560 if (!rtw89_is_rtl885xb(rtwdev)) 2561 return 0; 2562 2563 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2564 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2565 } 2566 2567 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2568 { 2569 if (pwr_up) 2570 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2571 else 2572 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2573 } 2574 2575 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2576 { 2577 if (rtwdev->chip->chip_id != RTL8852C) 2578 return; 2579 2580 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2581 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2582 } 2583 2584 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2585 { 2586 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2587 return; 2588 2589 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2590 } 2591 2592 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2593 { 2594 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2595 return; 2596 2597 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2598 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2599 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2600 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2601 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2602 } 2603 2604 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2605 { 2606 if (rtwdev->chip->chip_id != RTL8852C) 2607 return; 2608 2609 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2610 } 2611 2612 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2613 { 2614 if (rtwdev->chip->chip_id != RTL8852C) 2615 return; 2616 2617 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2618 } 2619 2620 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2621 { 2622 if (rtwdev->chip->chip_id == RTL8852C) 2623 return; 2624 2625 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2626 B_AX_SIC_EN_FORCE_CLKREQ); 2627 } 2628 2629 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2630 { 2631 const struct rtw89_pci_info *info = rtwdev->pci_info; 2632 u32 lbc; 2633 2634 if (rtwdev->chip->chip_id == RTL8852C) 2635 return; 2636 2637 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2638 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2639 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2640 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2641 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2642 } else { 2643 lbc &= ~B_AX_LBC_EN; 2644 } 2645 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2646 } 2647 2648 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2649 { 2650 const struct rtw89_pci_info *info = rtwdev->pci_info; 2651 u32 val32; 2652 2653 if (rtwdev->chip->chip_id != RTL8852C) 2654 return; 2655 2656 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2657 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2658 info->io_rcy_tmr); 2659 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2660 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2661 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2662 2663 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2664 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2665 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2666 } else { 2667 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2668 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2669 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2670 } 2671 2672 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2673 } 2674 2675 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2676 { 2677 if (rtwdev->chip->chip_id == RTL8852C) 2678 return; 2679 2680 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2681 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2682 2683 if (rtwdev->chip->chip_id == RTL8852A) 2684 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2685 B_AX_EN_CHKDSC_NO_RX_STUCK); 2686 } 2687 2688 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2689 { 2690 if (rtwdev->chip->chip_id == RTL8852C) 2691 return; 2692 2693 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2694 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2695 } 2696 2697 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) 2698 { 2699 const struct rtw89_pci_info *info = rtwdev->pci_info; 2700 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2701 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2702 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2703 B_AX_CLR_CH12_IDX; 2704 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2705 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2706 2707 if (chip_id == RTL8852A || chip_id == RTL8852C) 2708 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2709 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2710 /* clear DMA indexes */ 2711 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2712 if (chip_id == RTL8852A || chip_id == RTL8852C) 2713 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2714 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2715 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2716 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2717 } 2718 2719 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2720 { 2721 const struct rtw89_pci_info *info = rtwdev->pci_info; 2722 u32 ret, check, dma_busy; 2723 u32 dma_busy1 = info->dma_busy1.addr; 2724 u32 dma_busy2 = info->dma_busy2_reg; 2725 2726 check = info->dma_busy1.mask; 2727 2728 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2729 10, 100, false, rtwdev, dma_busy1); 2730 if (ret) 2731 return ret; 2732 2733 if (!dma_busy2) 2734 return 0; 2735 2736 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2737 2738 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2739 10, 100, false, rtwdev, dma_busy2); 2740 if (ret) 2741 return ret; 2742 2743 return 0; 2744 } 2745 2746 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2747 { 2748 const struct rtw89_pci_info *info = rtwdev->pci_info; 2749 u32 ret, check, dma_busy; 2750 u32 dma_busy3 = info->dma_busy3_reg; 2751 2752 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2753 2754 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2755 10, 100, false, rtwdev, dma_busy3); 2756 if (ret) 2757 return ret; 2758 2759 return 0; 2760 } 2761 2762 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2763 { 2764 u32 ret; 2765 2766 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev); 2767 if (ret) { 2768 rtw89_err(rtwdev, "txdma ch busy\n"); 2769 return ret; 2770 } 2771 2772 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev); 2773 if (ret) { 2774 rtw89_err(rtwdev, "rxdma ch busy\n"); 2775 return ret; 2776 } 2777 2778 return 0; 2779 } 2780 2781 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2782 { 2783 const struct rtw89_pci_info *info = rtwdev->pci_info; 2784 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2785 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2786 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2787 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2788 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2789 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2790 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2791 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2792 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2793 u8 cv = rtwdev->hal.cv; 2794 u32 val32; 2795 2796 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2797 if (chip_id == RTL8852A && cv == CHIP_CBV) 2798 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2799 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2800 if (chip_id == RTL8852A || chip_id == RTL8852B) 2801 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2802 } 2803 2804 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2805 if (chip_id == RTL8852A && cv == CHIP_CBV) 2806 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2807 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2808 if (chip_id == RTL8852A || chip_id == RTL8852B) 2809 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2810 } 2811 2812 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2813 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2814 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2815 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2816 2817 if (chip_id == RTL8852A || chip_id == RTL8852B) 2818 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2819 B_AX_PCIE_RX_APPLEN_MASK, 0); 2820 } 2821 2822 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2823 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2824 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2825 } else if (chip_id == RTL8852C) { 2826 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2827 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2828 } 2829 2830 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2831 if (tag_mode == MAC_AX_TAG_SGL) { 2832 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2833 ~B_AX_LATENCY_CONTROL; 2834 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2835 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2836 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2837 B_AX_LATENCY_CONTROL; 2838 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2839 } 2840 } 2841 2842 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2843 info->multi_tag_num); 2844 2845 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2846 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2847 wd_dma_idle_intvl); 2848 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2849 wd_dma_act_intvl); 2850 } else if (chip_id == RTL8852C) { 2851 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2852 wd_dma_idle_intvl); 2853 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2854 wd_dma_act_intvl); 2855 } 2856 2857 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2858 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2859 B_AX_HOST_ADDR_INFO_8B_SEL); 2860 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2861 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2862 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2863 B_AX_HOST_ADDR_INFO_8B_SEL); 2864 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2865 } 2866 2867 return 0; 2868 } 2869 2870 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2871 { 2872 const struct rtw89_pci_info *info = rtwdev->pci_info; 2873 2874 if (rtwdev->chip->chip_id == RTL8852A) { 2875 /* ltr sw trigger */ 2876 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2877 } 2878 info->ltr_set(rtwdev, false); 2879 rtw89_pci_ctrl_dma_all(rtwdev, false); 2880 rtw89_pci_clr_idx_all(rtwdev); 2881 2882 return 0; 2883 } 2884 2885 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) 2886 { 2887 const struct rtw89_pci_info *info = rtwdev->pci_info; 2888 int ret; 2889 2890 rtw89_pci_ber(rtwdev); 2891 rtw89_pci_rxdma_prefth(rtwdev); 2892 rtw89_pci_l1off_pwroff(rtwdev); 2893 rtw89_pci_deglitch_setting(rtwdev); 2894 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2895 if (ret) { 2896 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2897 return ret; 2898 } 2899 2900 rtw89_pci_aphy_pwrcut(rtwdev); 2901 rtw89_pci_hci_ldo(rtwdev); 2902 rtw89_pci_dphy_delay(rtwdev); 2903 2904 ret = rtw89_pci_autok_x(rtwdev); 2905 if (ret) { 2906 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2907 return ret; 2908 } 2909 2910 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2911 if (ret) { 2912 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2913 return ret; 2914 } 2915 2916 rtw89_pci_power_wake(rtwdev, true); 2917 rtw89_pci_autoload_hang(rtwdev); 2918 rtw89_pci_l12_vmain(rtwdev); 2919 rtw89_pci_gen2_force_ib(rtwdev); 2920 rtw89_pci_l1_ent_lat(rtwdev); 2921 rtw89_pci_wd_exit_l1(rtwdev); 2922 rtw89_pci_set_sic(rtwdev); 2923 rtw89_pci_set_lbc(rtwdev); 2924 rtw89_pci_set_io_rcy(rtwdev); 2925 rtw89_pci_set_dbg(rtwdev); 2926 rtw89_pci_set_keep_reg(rtwdev); 2927 2928 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2929 2930 /* stop DMA activities */ 2931 rtw89_pci_ctrl_dma_all(rtwdev, false); 2932 2933 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2934 if (ret) { 2935 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2936 return ret; 2937 } 2938 2939 rtw89_pci_clr_idx_all(rtwdev); 2940 rtw89_pci_mode_op(rtwdev); 2941 2942 /* fill TRX BD indexes */ 2943 rtw89_pci_ops_reset(rtwdev); 2944 2945 ret = rtw89_pci_rst_bdram_ax(rtwdev); 2946 if (ret) { 2947 rtw89_warn(rtwdev, "reset bdram busy\n"); 2948 return ret; 2949 } 2950 2951 /* disable all channels except to FW CMD channel to download firmware */ 2952 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false); 2953 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true); 2954 2955 /* start DMA activities */ 2956 rtw89_pci_ctrl_dma_all(rtwdev, true); 2957 2958 return 0; 2959 } 2960 2961 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2962 { 2963 u32 val; 2964 2965 if (!en) 2966 return 0; 2967 2968 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2969 if (rtw89_pci_ltr_is_err_reg_val(val)) 2970 return -EINVAL; 2971 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2972 if (rtw89_pci_ltr_is_err_reg_val(val)) 2973 return -EINVAL; 2974 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2975 if (rtw89_pci_ltr_is_err_reg_val(val)) 2976 return -EINVAL; 2977 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2978 if (rtw89_pci_ltr_is_err_reg_val(val)) 2979 return -EINVAL; 2980 2981 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 2982 B_AX_LTR_WD_NOEMP_CHK); 2983 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2984 PCI_LTR_SPC_500US); 2985 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2986 PCI_LTR_IDLE_TIMER_3_2MS); 2987 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2988 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2989 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 2990 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2991 2992 return 0; 2993 } 2994 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2995 2996 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2997 { 2998 u32 dec_ctrl; 2999 u32 val32; 3000 3001 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 3002 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3003 return -EINVAL; 3004 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 3005 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3006 return -EINVAL; 3007 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 3008 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 3009 return -EINVAL; 3010 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 3011 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3012 return -EINVAL; 3013 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 3014 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3015 return -EINVAL; 3016 3017 if (!en) { 3018 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 3019 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 3020 B_AX_LTR_REQ_DRV; 3021 } else { 3022 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 3023 } 3024 3025 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 3026 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 3027 3028 if (en) 3029 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 3030 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 3031 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 3032 PCI_LTR_IDLE_TIMER_3_2MS); 3033 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 3034 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 3035 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 3036 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 3037 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 3038 3039 return 0; 3040 } 3041 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 3042 3043 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) 3044 { 3045 const struct rtw89_pci_info *info = rtwdev->pci_info; 3046 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3047 int ret; 3048 3049 ret = info->ltr_set(rtwdev, true); 3050 if (ret) { 3051 rtw89_err(rtwdev, "pci ltr set fail\n"); 3052 return ret; 3053 } 3054 if (chip_id == RTL8852A) { 3055 /* ltr sw trigger */ 3056 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 3057 } 3058 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3059 /* ADDR info 8-byte mode */ 3060 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 3061 B_AX_HOST_ADDR_INFO_8B_SEL); 3062 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 3063 } 3064 3065 /* enable DMA for all queues */ 3066 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true); 3067 3068 /* Release PCI IO */ 3069 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 3070 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 3071 3072 return 0; 3073 } 3074 3075 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 3076 struct pci_dev *pdev) 3077 { 3078 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3079 int ret; 3080 3081 ret = pci_enable_device(pdev); 3082 if (ret) { 3083 rtw89_err(rtwdev, "failed to enable pci device\n"); 3084 return ret; 3085 } 3086 3087 pci_set_master(pdev); 3088 pci_set_drvdata(pdev, rtwdev->hw); 3089 3090 rtwpci->pdev = pdev; 3091 3092 return 0; 3093 } 3094 3095 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 3096 struct pci_dev *pdev) 3097 { 3098 pci_disable_device(pdev); 3099 } 3100 3101 static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) 3102 { 3103 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3104 const struct rtw89_chip_info *chip = rtwdev->chip; 3105 3106 if (!rtwpci->enable_dac) 3107 return; 3108 3109 switch (chip->chip_id) { 3110 case RTL8852A: 3111 case RTL8852B: 3112 case RTL8851B: 3113 case RTL8852BT: 3114 break; 3115 default: 3116 return; 3117 } 3118 3119 rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS); 3120 } 3121 3122 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 3123 struct pci_dev *pdev) 3124 { 3125 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3126 unsigned long resource_len; 3127 u8 bar_id = 2; 3128 int ret; 3129 3130 ret = pci_request_regions(pdev, KBUILD_MODNAME); 3131 if (ret) { 3132 rtw89_err(rtwdev, "failed to request pci regions\n"); 3133 goto err; 3134 } 3135 3136 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); 3137 if (!ret) { 3138 rtwpci->enable_dac = true; 3139 rtw89_pci_cfg_dac(rtwdev); 3140 } else { 3141 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3142 if (ret) { 3143 rtw89_err(rtwdev, 3144 "failed to set dma and consistent mask to 32/36-bit\n"); 3145 goto err_release_regions; 3146 } 3147 } 3148 3149 #if defined(__FreeBSD__) 3150 linuxkpi_pcim_want_to_use_bus_functions(pdev); 3151 #endif 3152 resource_len = pci_resource_len(pdev, bar_id); 3153 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 3154 if (!rtwpci->mmap) { 3155 rtw89_err(rtwdev, "failed to map pci io\n"); 3156 ret = -EIO; 3157 goto err_release_regions; 3158 } 3159 3160 return 0; 3161 3162 err_release_regions: 3163 pci_release_regions(pdev); 3164 err: 3165 return ret; 3166 } 3167 3168 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 3169 struct pci_dev *pdev) 3170 { 3171 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3172 3173 if (rtwpci->mmap) { 3174 pci_iounmap(pdev, rtwpci->mmap); 3175 pci_release_regions(pdev); 3176 } 3177 } 3178 3179 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 3180 struct pci_dev *pdev, 3181 struct rtw89_pci_tx_ring *tx_ring) 3182 { 3183 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3184 u8 *head = wd_ring->head; 3185 dma_addr_t dma = wd_ring->dma; 3186 u32 page_size = wd_ring->page_size; 3187 u32 page_num = wd_ring->page_num; 3188 u32 ring_sz = page_size * page_num; 3189 3190 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3191 wd_ring->head = NULL; 3192 } 3193 3194 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 3195 struct pci_dev *pdev, 3196 struct rtw89_pci_tx_ring *tx_ring) 3197 { 3198 int ring_sz; 3199 u8 *head; 3200 dma_addr_t dma; 3201 3202 head = tx_ring->bd_ring.head; 3203 dma = tx_ring->bd_ring.dma; 3204 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 3205 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3206 3207 tx_ring->bd_ring.head = NULL; 3208 } 3209 3210 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 3211 struct pci_dev *pdev) 3212 { 3213 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3214 const struct rtw89_pci_info *info = rtwdev->pci_info; 3215 struct rtw89_pci_tx_ring *tx_ring; 3216 int i; 3217 3218 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3219 if (info->tx_dma_ch_mask & BIT(i)) 3220 continue; 3221 tx_ring = &rtwpci->tx_rings[i]; 3222 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3223 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3224 } 3225 } 3226 3227 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 3228 struct pci_dev *pdev, 3229 struct rtw89_pci_rx_ring *rx_ring) 3230 { 3231 struct rtw89_pci_rx_info *rx_info; 3232 struct sk_buff *skb; 3233 dma_addr_t dma; 3234 u32 buf_sz; 3235 u8 *head; 3236 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 3237 int i; 3238 3239 buf_sz = rx_ring->buf_sz; 3240 for (i = 0; i < rx_ring->bd_ring.len; i++) { 3241 skb = rx_ring->buf[i]; 3242 if (!skb) 3243 continue; 3244 3245 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3246 dma = rx_info->dma; 3247 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3248 dev_kfree_skb(skb); 3249 rx_ring->buf[i] = NULL; 3250 } 3251 3252 head = rx_ring->bd_ring.head; 3253 dma = rx_ring->bd_ring.dma; 3254 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3255 3256 rx_ring->bd_ring.head = NULL; 3257 } 3258 3259 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 3260 struct pci_dev *pdev) 3261 { 3262 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3263 struct rtw89_pci_rx_ring *rx_ring; 3264 int i; 3265 3266 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3267 rx_ring = &rtwpci->rx_rings[i]; 3268 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3269 } 3270 } 3271 3272 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 3273 struct pci_dev *pdev) 3274 { 3275 rtw89_pci_free_rx_rings(rtwdev, pdev); 3276 rtw89_pci_free_tx_rings(rtwdev, pdev); 3277 } 3278 3279 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 3280 struct rtw89_pci_rx_ring *rx_ring, 3281 struct sk_buff *skb, int buf_sz, u32 idx) 3282 { 3283 struct rtw89_pci_rx_info *rx_info; 3284 struct rtw89_pci_rx_bd_32 *rx_bd; 3285 dma_addr_t dma; 3286 3287 if (!skb) 3288 return -EINVAL; 3289 3290 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 3291 if (dma_mapping_error(&pdev->dev, dma)) 3292 return -EBUSY; 3293 3294 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3295 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 3296 3297 memset(rx_bd, 0, sizeof(*rx_bd)); 3298 rx_bd->buf_size = cpu_to_le16(buf_sz); 3299 rx_bd->dma = cpu_to_le32(dma); 3300 rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI); 3301 rx_info->dma = dma; 3302 3303 return 0; 3304 } 3305 3306 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 3307 struct pci_dev *pdev, 3308 struct rtw89_pci_tx_ring *tx_ring, 3309 enum rtw89_tx_channel txch) 3310 { 3311 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3312 struct rtw89_pci_tx_wd *txwd; 3313 dma_addr_t dma; 3314 dma_addr_t cur_paddr; 3315 u8 *head; 3316 u8 *cur_vaddr; 3317 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 3318 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 3319 u32 ring_sz = page_size * page_num; 3320 u32 page_offset; 3321 int i; 3322 3323 /* FWCMD queue doesn't use txwd as pages */ 3324 if (txch == RTW89_TXCH_CH12) 3325 return 0; 3326 3327 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3328 if (!head) 3329 return -ENOMEM; 3330 3331 INIT_LIST_HEAD(&wd_ring->free_pages); 3332 wd_ring->head = head; 3333 wd_ring->dma = dma; 3334 wd_ring->page_size = page_size; 3335 wd_ring->page_num = page_num; 3336 3337 page_offset = 0; 3338 for (i = 0; i < page_num; i++) { 3339 txwd = &wd_ring->pages[i]; 3340 cur_paddr = dma + page_offset; 3341 cur_vaddr = head + page_offset; 3342 3343 skb_queue_head_init(&txwd->queue); 3344 INIT_LIST_HEAD(&txwd->list); 3345 txwd->paddr = cur_paddr; 3346 txwd->vaddr = cur_vaddr; 3347 txwd->len = page_size; 3348 txwd->seq = i; 3349 rtw89_pci_enqueue_txwd(tx_ring, txwd); 3350 3351 page_offset += page_size; 3352 } 3353 3354 return 0; 3355 } 3356 3357 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 3358 struct pci_dev *pdev, 3359 struct rtw89_pci_tx_ring *tx_ring, 3360 u32 desc_size, u32 len, 3361 enum rtw89_tx_channel txch) 3362 { 3363 const struct rtw89_pci_ch_dma_addr *txch_addr; 3364 int ring_sz = desc_size * len; 3365 u8 *head; 3366 dma_addr_t dma; 3367 int ret; 3368 3369 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 3370 if (ret) { 3371 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 3372 goto err; 3373 } 3374 3375 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3376 if (ret) { 3377 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3378 goto err_free_wd_ring; 3379 } 3380 3381 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3382 if (!head) { 3383 ret = -ENOMEM; 3384 goto err_free_wd_ring; 3385 } 3386 3387 INIT_LIST_HEAD(&tx_ring->busy_pages); 3388 tx_ring->bd_ring.head = head; 3389 tx_ring->bd_ring.dma = dma; 3390 tx_ring->bd_ring.len = len; 3391 tx_ring->bd_ring.desc_size = desc_size; 3392 tx_ring->bd_ring.addr = *txch_addr; 3393 tx_ring->bd_ring.wp = 0; 3394 tx_ring->bd_ring.rp = 0; 3395 tx_ring->txch = txch; 3396 3397 return 0; 3398 3399 err_free_wd_ring: 3400 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3401 err: 3402 return ret; 3403 } 3404 3405 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 3406 struct pci_dev *pdev) 3407 { 3408 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3409 const struct rtw89_pci_info *info = rtwdev->pci_info; 3410 struct rtw89_pci_tx_ring *tx_ring; 3411 u32 desc_size; 3412 u32 len; 3413 u32 i, tx_allocated; 3414 int ret; 3415 3416 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3417 if (info->tx_dma_ch_mask & BIT(i)) 3418 continue; 3419 tx_ring = &rtwpci->tx_rings[i]; 3420 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3421 len = RTW89_PCI_TXBD_NUM_MAX; 3422 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3423 desc_size, len, i); 3424 if (ret) { 3425 #if defined(__linux__) 3426 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3427 #elif defined(__FreeBSD__) 3428 rtw89_err(rtwdev, "failed to alloc tx ring %d: ret=%d\n", i, ret); 3429 #endif 3430 goto err_free; 3431 } 3432 } 3433 3434 return 0; 3435 3436 err_free: 3437 tx_allocated = i; 3438 for (i = 0; i < tx_allocated; i++) { 3439 tx_ring = &rtwpci->tx_rings[i]; 3440 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3441 } 3442 3443 return ret; 3444 } 3445 3446 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3447 struct pci_dev *pdev, 3448 struct rtw89_pci_rx_ring *rx_ring, 3449 u32 desc_size, u32 len, u32 rxch) 3450 { 3451 const struct rtw89_pci_info *info = rtwdev->pci_info; 3452 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3453 struct sk_buff *skb; 3454 u8 *head; 3455 dma_addr_t dma; 3456 int ring_sz = desc_size * len; 3457 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3458 int i, allocated; 3459 int ret; 3460 3461 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3462 if (ret) { 3463 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3464 return ret; 3465 } 3466 3467 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3468 if (!head) { 3469 ret = -ENOMEM; 3470 goto err; 3471 } 3472 3473 rx_ring->bd_ring.head = head; 3474 rx_ring->bd_ring.dma = dma; 3475 rx_ring->bd_ring.len = len; 3476 rx_ring->bd_ring.desc_size = desc_size; 3477 rx_ring->bd_ring.addr = *rxch_addr; 3478 if (info->rx_ring_eq_is_full) 3479 rx_ring->bd_ring.wp = len - 1; 3480 else 3481 rx_ring->bd_ring.wp = 0; 3482 rx_ring->bd_ring.rp = 0; 3483 rx_ring->buf_sz = buf_sz; 3484 rx_ring->diliver_skb = NULL; 3485 rx_ring->diliver_desc.ready = false; 3486 rx_ring->target_rx_tag = 0; 3487 3488 for (i = 0; i < len; i++) { 3489 skb = dev_alloc_skb(buf_sz); 3490 if (!skb) { 3491 ret = -ENOMEM; 3492 goto err_free; 3493 } 3494 3495 memset(skb->data, 0, buf_sz); 3496 rx_ring->buf[i] = skb; 3497 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3498 buf_sz, i); 3499 if (ret) { 3500 #if defined(__linux__) 3501 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3502 #elif defined(__FreeBSD__) 3503 rtw89_err(rtwdev, "failed to init rx buf %d ret=%d\n", i, ret); 3504 #endif 3505 dev_kfree_skb_any(skb); 3506 rx_ring->buf[i] = NULL; 3507 goto err_free; 3508 } 3509 } 3510 3511 return 0; 3512 3513 err_free: 3514 allocated = i; 3515 for (i = 0; i < allocated; i++) { 3516 skb = rx_ring->buf[i]; 3517 if (!skb) 3518 continue; 3519 dma = *((dma_addr_t *)skb->cb); 3520 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3521 dev_kfree_skb(skb); 3522 rx_ring->buf[i] = NULL; 3523 } 3524 3525 head = rx_ring->bd_ring.head; 3526 dma = rx_ring->bd_ring.dma; 3527 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3528 3529 rx_ring->bd_ring.head = NULL; 3530 err: 3531 return ret; 3532 } 3533 3534 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3535 struct pci_dev *pdev) 3536 { 3537 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3538 struct rtw89_pci_rx_ring *rx_ring; 3539 u32 desc_size; 3540 u32 len; 3541 int i, rx_allocated; 3542 int ret; 3543 3544 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3545 rx_ring = &rtwpci->rx_rings[i]; 3546 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3547 len = RTW89_PCI_RXBD_NUM_MAX; 3548 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3549 desc_size, len, i); 3550 if (ret) { 3551 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3552 goto err_free; 3553 } 3554 } 3555 3556 return 0; 3557 3558 err_free: 3559 rx_allocated = i; 3560 for (i = 0; i < rx_allocated; i++) { 3561 rx_ring = &rtwpci->rx_rings[i]; 3562 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3563 } 3564 3565 return ret; 3566 } 3567 3568 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3569 struct pci_dev *pdev) 3570 { 3571 int ret; 3572 3573 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3574 if (ret) { 3575 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3576 goto err; 3577 } 3578 3579 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3580 if (ret) { 3581 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3582 goto err_free_tx_rings; 3583 } 3584 3585 return 0; 3586 3587 err_free_tx_rings: 3588 rtw89_pci_free_tx_rings(rtwdev, pdev); 3589 err: 3590 return ret; 3591 } 3592 3593 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3594 struct rtw89_pci *rtwpci) 3595 { 3596 skb_queue_head_init(&rtwpci->h2c_queue); 3597 skb_queue_head_init(&rtwpci->h2c_release_queue); 3598 } 3599 3600 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3601 struct pci_dev *pdev) 3602 { 3603 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3604 int ret; 3605 3606 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3607 if (ret) { 3608 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3609 goto err; 3610 } 3611 3612 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3613 if (ret) { 3614 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3615 goto err_pci_unmap; 3616 } 3617 3618 rtw89_pci_h2c_init(rtwdev, rtwpci); 3619 3620 spin_lock_init(&rtwpci->irq_lock); 3621 spin_lock_init(&rtwpci->trx_lock); 3622 3623 return 0; 3624 3625 err_pci_unmap: 3626 rtw89_pci_clear_mapping(rtwdev, pdev); 3627 err: 3628 return ret; 3629 } 3630 3631 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3632 struct pci_dev *pdev) 3633 { 3634 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3635 3636 rtw89_pci_free_trx_rings(rtwdev, pdev); 3637 rtw89_pci_clear_mapping(rtwdev, pdev); 3638 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3639 skb_queue_len(&rtwpci->h2c_queue), true); 3640 } 3641 3642 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3643 { 3644 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3645 const struct rtw89_chip_info *chip = rtwdev->chip; 3646 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; 3647 3648 if (chip->chip_id == RTL8851B) 3649 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; 3650 3651 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3652 3653 if (rtwpci->under_recovery) { 3654 rtwpci->intrs[0] = hs0isr_ind_int_en; 3655 rtwpci->intrs[1] = 0; 3656 } else { 3657 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3658 B_AX_RXDMA_INT_EN | 3659 B_AX_RXP1DMA_INT_EN | 3660 B_AX_RPQDMA_INT_EN | 3661 B_AX_RXDMA_STUCK_INT_EN | 3662 B_AX_RDU_INT_EN | 3663 B_AX_RPQBD_FULL_INT_EN | 3664 hs0isr_ind_int_en; 3665 3666 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3667 } 3668 } 3669 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3670 3671 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3672 { 3673 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3674 3675 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3676 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3677 rtwpci->intrs[0] = 0; 3678 rtwpci->intrs[1] = 0; 3679 } 3680 3681 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3682 { 3683 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3684 3685 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3686 B_AX_HS1ISR_IND_INT_EN | 3687 B_AX_HS0ISR_IND_INT_EN; 3688 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3689 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3690 B_AX_RXDMA_INT_EN | 3691 B_AX_RXP1DMA_INT_EN | 3692 B_AX_RPQDMA_INT_EN | 3693 B_AX_RXDMA_STUCK_INT_EN | 3694 B_AX_RDU_INT_EN | 3695 B_AX_RPQBD_FULL_INT_EN; 3696 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3697 } 3698 3699 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3700 { 3701 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3702 3703 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3704 B_AX_HS0ISR_IND_INT_EN; 3705 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3706 rtwpci->intrs[0] = 0; 3707 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3708 } 3709 3710 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3711 { 3712 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3713 3714 if (rtwpci->under_recovery) 3715 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3716 else if (rtwpci->low_power) 3717 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3718 else 3719 rtw89_pci_default_intr_mask_v1(rtwdev); 3720 } 3721 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3722 3723 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev) 3724 { 3725 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3726 3727 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3728 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3729 rtwpci->intrs[0] = 0; 3730 rtwpci->intrs[1] = 0; 3731 } 3732 3733 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev) 3734 { 3735 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3736 3737 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 | 3738 B_BE_HS0_IND_INT_EN0; 3739 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3740 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 | 3741 B_BE_RDU_CH0_INT_IMR_V1; 3742 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3743 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3744 } 3745 3746 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev) 3747 { 3748 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3749 3750 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 | 3751 B_BE_HS1_IND_INT_EN0; 3752 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3753 rtwpci->intrs[0] = 0; 3754 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3755 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3756 } 3757 3758 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev) 3759 { 3760 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3761 3762 if (rtwpci->under_recovery) 3763 rtw89_pci_recovery_intr_mask_v2(rtwdev); 3764 else if (rtwpci->low_power) 3765 rtw89_pci_low_power_intr_mask_v2(rtwdev); 3766 else 3767 rtw89_pci_default_intr_mask_v2(rtwdev); 3768 } 3769 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); 3770 3771 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3772 struct pci_dev *pdev) 3773 { 3774 unsigned long flags = 0; 3775 int ret; 3776 3777 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI; 3778 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3779 if (ret < 0) { 3780 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3781 goto err; 3782 } 3783 3784 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3785 rtw89_pci_interrupt_handler, 3786 rtw89_pci_interrupt_threadfn, 3787 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3788 if (ret) { 3789 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3790 goto err_free_vector; 3791 } 3792 3793 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3794 3795 return 0; 3796 3797 err_free_vector: 3798 pci_free_irq_vectors(pdev); 3799 err: 3800 return ret; 3801 } 3802 3803 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3804 struct pci_dev *pdev) 3805 { 3806 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3807 pci_free_irq_vectors(pdev); 3808 } 3809 3810 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3811 { 3812 u16 bin = 0, gray_bit; 3813 u32 bit_idx; 3814 3815 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3816 gray_bit = (gray_code >> bit_idx) & 0x1; 3817 if (bit_num - bit_idx > 1) 3818 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3819 bin |= (gray_bit << bit_idx); 3820 } 3821 3822 return bin; 3823 } 3824 3825 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3826 { 3827 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3828 struct pci_dev *pdev = rtwpci->pdev; 3829 u16 val16, filter_out_val; 3830 u32 val, phy_offset; 3831 int ret; 3832 3833 if (rtwdev->chip->chip_id != RTL8852C) 3834 return 0; 3835 3836 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3837 if (val == B_AX_ASPM_CTRL_L1) 3838 return 0; 3839 3840 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3841 if (ret) 3842 return ret; 3843 3844 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3845 if (val == RTW89_PCIE_GEN1_SPEED) { 3846 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3847 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3848 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3849 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3850 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3851 val16 | B_PCIE_BIT_PINOUT_DIS); 3852 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3853 val16 & ~B_PCIE_BIT_RD_SEL); 3854 3855 val16 = rtw89_read16_mask(rtwdev, 3856 phy_offset + RAC_ANA1F * RAC_MULT, 3857 FILTER_OUT_EQ_MASK); 3858 val16 = gray_code_to_bin(val16, hweight16(val16)); 3859 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3860 RAC_MULT); 3861 filter_out_val &= ~REG_FILTER_OUT_MASK; 3862 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3863 3864 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3865 filter_out_val); 3866 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3867 B_BAC_EQ_SEL); 3868 rtw89_write16_set(rtwdev, 3869 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3870 B_PCIE_BIT_PSAVE); 3871 } else { 3872 return -EOPNOTSUPP; 3873 } 3874 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3875 B_PCIE_BIT_PSAVE); 3876 3877 return 0; 3878 } 3879 3880 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3881 { 3882 const struct rtw89_pci_info *info = rtwdev->pci_info; 3883 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3884 3885 if (rtw89_pci_disable_clkreq) 3886 return; 3887 3888 gen_def->clkreq_set(rtwdev, enable); 3889 } 3890 3891 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) 3892 { 3893 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3894 int ret; 3895 3896 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3897 PCIE_CLKDLY_HW_30US); 3898 if (ret) 3899 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3900 3901 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3902 if (enable) 3903 ret = rtw89_pci_config_byte_set(rtwdev, 3904 RTW89_PCIE_L1_CTRL, 3905 RTW89_PCIE_BIT_CLK); 3906 else 3907 ret = rtw89_pci_config_byte_clr(rtwdev, 3908 RTW89_PCIE_L1_CTRL, 3909 RTW89_PCIE_BIT_CLK); 3910 if (ret) 3911 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3912 enable ? "set" : "unset", ret); 3913 } else if (chip_id == RTL8852C) { 3914 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3915 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3916 if (enable) 3917 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3918 B_AX_CLK_REQ_N); 3919 else 3920 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3921 B_AX_CLK_REQ_N); 3922 } 3923 } 3924 3925 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3926 { 3927 const struct rtw89_pci_info *info = rtwdev->pci_info; 3928 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3929 3930 if (rtw89_pci_disable_aspm_l1) 3931 return; 3932 3933 gen_def->aspm_set(rtwdev, enable); 3934 } 3935 3936 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) 3937 { 3938 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3939 u8 value = 0; 3940 int ret; 3941 3942 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3943 if (ret) 3944 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 3945 3946 u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK); 3947 u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK); 3948 3949 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3950 if (ret) 3951 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 3952 3953 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3954 if (enable) 3955 ret = rtw89_pci_config_byte_set(rtwdev, 3956 RTW89_PCIE_L1_CTRL, 3957 RTW89_PCIE_BIT_L1); 3958 else 3959 ret = rtw89_pci_config_byte_clr(rtwdev, 3960 RTW89_PCIE_L1_CTRL, 3961 RTW89_PCIE_BIT_L1); 3962 } else if (chip_id == RTL8852C) { 3963 if (enable) 3964 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3965 B_AX_ASPM_CTRL_L1); 3966 else 3967 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3968 B_AX_ASPM_CTRL_L1); 3969 } 3970 if (ret) 3971 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3972 enable ? "set" : "unset", ret); 3973 } 3974 3975 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3976 { 3977 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 3978 const struct rtw89_pci_info *info = rtwdev->pci_info; 3979 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3980 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3981 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3982 u32 val = 0; 3983 3984 if (rtwdev->scanning || 3985 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH)) 3986 goto out; 3987 3988 if (chip_gen == RTW89_CHIP_BE) 3989 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN; 3990 else 3991 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3992 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3993 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3994 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3995 3996 out: 3997 rtw89_write32(rtwdev, info->mit_addr, val); 3998 } 3999 4000 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 4001 { 4002 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4003 struct pci_dev *pdev = rtwpci->pdev; 4004 u16 link_ctrl; 4005 int ret; 4006 4007 /* Though there is standard PCIE configuration space to set the 4008 * link control register, but by Realtek's design, driver should 4009 * check if host supports CLKREQ/ASPM to enable the HW module. 4010 * 4011 * These functions are implemented by two HW modules associated, 4012 * one is responsible to access PCIE configuration space to 4013 * follow the host settings, and another is in charge of doing 4014 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 4015 * the host does not support it, and due to some reasons or wrong 4016 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 4017 * loss if HW misbehaves on the link. 4018 * 4019 * Hence it's designed that driver should first check the PCIE 4020 * configuration space is sync'ed and enabled, then driver can turn 4021 * on the other module that is actually working on the mechanism. 4022 */ 4023 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 4024 if (ret) { 4025 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 4026 return; 4027 } 4028 4029 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 4030 rtw89_pci_clkreq_set(rtwdev, true); 4031 4032 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 4033 rtw89_pci_aspm_set(rtwdev, true); 4034 } 4035 4036 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 4037 { 4038 const struct rtw89_pci_info *info = rtwdev->pci_info; 4039 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4040 4041 if (rtw89_pci_disable_l1ss) 4042 return; 4043 4044 gen_def->l1ss_set(rtwdev, enable); 4045 } 4046 4047 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) 4048 { 4049 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4050 int ret; 4051 4052 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4053 if (enable) 4054 ret = rtw89_pci_config_byte_set(rtwdev, 4055 RTW89_PCIE_TIMER_CTRL, 4056 RTW89_PCIE_BIT_L1SUB); 4057 else 4058 ret = rtw89_pci_config_byte_clr(rtwdev, 4059 RTW89_PCIE_TIMER_CTRL, 4060 RTW89_PCIE_BIT_L1SUB); 4061 if (ret) 4062 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 4063 enable ? "set" : "unset", ret); 4064 } else if (chip_id == RTL8852C) { 4065 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 4066 RTW89_PCIE_BIT_ASPM_L11 | 4067 RTW89_PCIE_BIT_PCI_L11); 4068 if (ret) 4069 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 4070 if (enable) 4071 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4072 B_AX_L1SUB_DISABLE); 4073 else 4074 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4075 B_AX_L1SUB_DISABLE); 4076 } 4077 } 4078 4079 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 4080 { 4081 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4082 struct pci_dev *pdev = rtwpci->pdev; 4083 u32 l1ss_cap_ptr, l1ss_ctrl; 4084 4085 if (rtw89_pci_disable_l1ss) 4086 return; 4087 4088 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 4089 if (!l1ss_cap_ptr) 4090 return; 4091 4092 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 4093 4094 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 4095 rtw89_pci_l1ss_set(rtwdev, true); 4096 } 4097 4098 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev) 4099 { 4100 int ret = 0; 4101 u32 sts; 4102 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 4103 4104 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 4105 10, 1000, false, rtwdev, 4106 R_AX_PCIE_DMA_BUSY1); 4107 if (ret) { 4108 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 4109 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 4110 return -EINVAL; 4111 } 4112 return ret; 4113 } 4114 4115 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev) 4116 { 4117 u32 val; 4118 int ret; 4119 4120 if (rtwdev->chip->chip_id == RTL8852C) 4121 return 0; 4122 4123 rtw89_pci_ctrl_dma_all(rtwdev, false); 4124 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4125 if (ret) { 4126 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4127 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4128 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 4129 R_AX_DBG_ERR_FLAG, val); 4130 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 4131 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 4132 if (val & B_AX_RX_STUCK) 4133 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 4134 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4135 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4136 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4137 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4138 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 4139 R_AX_DBG_ERR_FLAG, val); 4140 } 4141 4142 return ret; 4143 } 4144 4145 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) 4146 { 4147 u32 ret; 4148 4149 if (rtwdev->chip->chip_id == RTL8852C) 4150 return 0; 4151 4152 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 4153 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4154 rtw89_pci_clr_idx_all(rtwdev); 4155 4156 ret = rtw89_pci_rst_bdram_ax(rtwdev); 4157 if (ret) 4158 return ret; 4159 4160 rtw89_pci_ctrl_dma_all(rtwdev, true); 4161 return ret; 4162 } 4163 4164 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 4165 enum rtw89_lv1_rcvy_step step) 4166 { 4167 const struct rtw89_pci_info *info = rtwdev->pci_info; 4168 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4169 int ret; 4170 4171 switch (step) { 4172 case RTW89_LV1_RCVY_STEP_1: 4173 ret = gen_def->lv1rst_stop_dma(rtwdev); 4174 if (ret) 4175 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 4176 4177 break; 4178 4179 case RTW89_LV1_RCVY_STEP_2: 4180 ret = gen_def->lv1rst_start_dma(rtwdev); 4181 if (ret) 4182 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 4183 break; 4184 4185 default: 4186 return -EINVAL; 4187 } 4188 4189 return ret; 4190 } 4191 4192 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 4193 { 4194 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 4195 return; 4196 4197 if (rtwdev->chip->chip_id == RTL8852C) { 4198 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4199 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1)); 4200 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4201 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1)); 4202 } else { 4203 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 4204 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 4205 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4206 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 4207 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4208 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 4209 } 4210 } 4211 4212 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 4213 { 4214 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 4215 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4216 const struct rtw89_pci_info *info = rtwdev->pci_info; 4217 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4218 unsigned long flags; 4219 int work_done; 4220 4221 rtwdev->napi_budget_countdown = budget; 4222 4223 rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data); 4224 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4225 if (work_done == budget) 4226 return budget; 4227 4228 rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data); 4229 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4230 if (work_done < budget && napi_complete_done(napi, work_done)) { 4231 spin_lock_irqsave(&rtwpci->irq_lock, flags); 4232 if (likely(rtwpci->running)) 4233 rtw89_chip_enable_intr(rtwdev, rtwpci); 4234 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 4235 } 4236 4237 return work_done; 4238 } 4239 4240 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 4241 { 4242 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4243 struct rtw89_dev *rtwdev = hw->priv; 4244 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4245 4246 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4247 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4248 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4249 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4250 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 4251 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4252 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 4253 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4254 } else { 4255 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4256 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4257 } 4258 4259 return 0; 4260 } 4261 4262 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 4263 { 4264 if (rtwdev->chip->chip_id == RTL8852C) 4265 return; 4266 4267 /* Hardware need write the reg twice to ensure the setting work */ 4268 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4269 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4270 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4271 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4272 } 4273 4274 static int __maybe_unused rtw89_pci_resume(struct device *dev) 4275 { 4276 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4277 struct rtw89_dev *rtwdev = hw->priv; 4278 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4279 4280 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4281 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4282 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4283 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4284 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 4285 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4286 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 4287 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4288 } else { 4289 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4290 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4291 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4292 B_AX_SEL_REQ_ENTR_L1); 4293 } 4294 rtw89_pci_l2_hci_ldo(rtwdev); 4295 rtw89_pci_disable_eq(rtwdev); 4296 rtw89_pci_cfg_dac(rtwdev); 4297 rtw89_pci_filter_out(rtwdev); 4298 rtw89_pci_link_cfg(rtwdev); 4299 rtw89_pci_l1ss_cfg(rtwdev); 4300 4301 return 0; 4302 } 4303 4304 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 4305 EXPORT_SYMBOL(rtw89_pm_ops); 4306 4307 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4308 .isr_rdu = B_AX_RDU_INT, 4309 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, 4310 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, 4311 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, 4312 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | 4313 B_AX_RDU_INT}, 4314 4315 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4316 .mac_pre_deinit = NULL, 4317 .mac_post_init = rtw89_pci_ops_mac_post_init_ax, 4318 4319 .clr_idx_all = rtw89_pci_clr_idx_all_ax, 4320 .rst_bdram = rtw89_pci_rst_bdram_ax, 4321 4322 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax, 4323 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax, 4324 4325 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax, 4326 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax, 4327 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax, 4328 4329 .aspm_set = rtw89_pci_aspm_set_ax, 4330 .clkreq_set = rtw89_pci_clkreq_set_ax, 4331 .l1ss_set = rtw89_pci_l1ss_set_ax, 4332 }; 4333 EXPORT_SYMBOL(rtw89_pci_gen_ax); 4334 4335 static const struct rtw89_hci_ops rtw89_pci_ops = { 4336 .tx_write = rtw89_pci_ops_tx_write, 4337 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 4338 .flush_queues = rtw89_pci_ops_flush_queues, 4339 .reset = rtw89_pci_ops_reset, 4340 .start = rtw89_pci_ops_start, 4341 .stop = rtw89_pci_ops_stop, 4342 .pause = rtw89_pci_ops_pause, 4343 .switch_mode = rtw89_pci_ops_switch_mode, 4344 .recalc_int_mit = rtw89_pci_recalc_int_mit, 4345 4346 .read8 = rtw89_pci_ops_read8, 4347 .read16 = rtw89_pci_ops_read16, 4348 .read32 = rtw89_pci_ops_read32, 4349 .write8 = rtw89_pci_ops_write8, 4350 .write16 = rtw89_pci_ops_write16, 4351 .write32 = rtw89_pci_ops_write32, 4352 4353 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 4354 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit, 4355 .mac_post_init = rtw89_pci_ops_mac_post_init, 4356 .deinit = rtw89_pci_ops_deinit, 4357 4358 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 4359 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 4360 .dump_err_status = rtw89_pci_ops_dump_err_status, 4361 .napi_poll = rtw89_pci_napi_poll, 4362 4363 .recovery_start = rtw89_pci_ops_recovery_start, 4364 .recovery_complete = rtw89_pci_ops_recovery_complete, 4365 4366 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch, 4367 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch, 4368 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 4369 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle, 4370 4371 .clr_idx_all = rtw89_pci_clr_idx_all, 4372 .clear = rtw89_pci_clear_resource, 4373 .disable_intr = rtw89_pci_disable_intr_lock, 4374 .enable_intr = rtw89_pci_enable_intr_lock, 4375 .rst_bdram = rtw89_pci_reset_bdram, 4376 }; 4377 4378 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4379 { 4380 struct rtw89_dev *rtwdev; 4381 const struct rtw89_driver_info *info; 4382 const struct rtw89_pci_info *pci_info; 4383 int ret; 4384 4385 info = (const struct rtw89_driver_info *)id->driver_data; 4386 4387 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 4388 sizeof(struct rtw89_pci), 4389 info->chip); 4390 if (!rtwdev) { 4391 dev_err(&pdev->dev, "failed to allocate hw\n"); 4392 return -ENOMEM; 4393 } 4394 4395 pci_info = info->bus.pci; 4396 4397 rtwdev->pci_info = info->bus.pci; 4398 rtwdev->hci.ops = &rtw89_pci_ops; 4399 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 4400 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 4401 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 4402 4403 rtw89_check_quirks(rtwdev, info->quirks); 4404 4405 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 4406 4407 ret = rtw89_core_init(rtwdev); 4408 if (ret) { 4409 rtw89_err(rtwdev, "failed to initialise core\n"); 4410 goto err_release_hw; 4411 } 4412 4413 ret = rtw89_pci_claim_device(rtwdev, pdev); 4414 if (ret) { 4415 rtw89_err(rtwdev, "failed to claim pci device\n"); 4416 goto err_core_deinit; 4417 } 4418 4419 ret = rtw89_pci_setup_resource(rtwdev, pdev); 4420 if (ret) { 4421 rtw89_err(rtwdev, "failed to setup pci resource\n"); 4422 goto err_declaim_pci; 4423 } 4424 4425 ret = rtw89_chip_info_setup(rtwdev); 4426 if (ret) { 4427 rtw89_err(rtwdev, "failed to setup chip information\n"); 4428 goto err_clear_resource; 4429 } 4430 4431 rtw89_pci_disable_eq(rtwdev); 4432 rtw89_pci_filter_out(rtwdev); 4433 rtw89_pci_link_cfg(rtwdev); 4434 rtw89_pci_l1ss_cfg(rtwdev); 4435 4436 ret = rtw89_core_napi_init(rtwdev); 4437 if (ret) { 4438 rtw89_err(rtwdev, "failed to init napi\n"); 4439 goto err_clear_resource; 4440 } 4441 4442 ret = rtw89_pci_request_irq(rtwdev, pdev); 4443 if (ret) { 4444 rtw89_err(rtwdev, "failed to request pci irq\n"); 4445 goto err_deinit_napi; 4446 } 4447 4448 ret = rtw89_core_register(rtwdev); 4449 if (ret) { 4450 rtw89_err(rtwdev, "failed to register core\n"); 4451 goto err_free_irq; 4452 } 4453 4454 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags); 4455 4456 return 0; 4457 4458 err_free_irq: 4459 rtw89_pci_free_irq(rtwdev, pdev); 4460 err_deinit_napi: 4461 rtw89_core_napi_deinit(rtwdev); 4462 err_clear_resource: 4463 rtw89_pci_clear_resource(rtwdev, pdev); 4464 err_declaim_pci: 4465 rtw89_pci_declaim_device(rtwdev, pdev); 4466 err_core_deinit: 4467 rtw89_core_deinit(rtwdev); 4468 err_release_hw: 4469 rtw89_free_ieee80211_hw(rtwdev); 4470 4471 return ret; 4472 } 4473 EXPORT_SYMBOL(rtw89_pci_probe); 4474 4475 void rtw89_pci_remove(struct pci_dev *pdev) 4476 { 4477 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4478 struct rtw89_dev *rtwdev; 4479 4480 rtwdev = hw->priv; 4481 4482 rtw89_pci_free_irq(rtwdev, pdev); 4483 rtw89_core_napi_deinit(rtwdev); 4484 rtw89_core_unregister(rtwdev); 4485 rtw89_pci_clear_resource(rtwdev, pdev); 4486 rtw89_pci_declaim_device(rtwdev, pdev); 4487 rtw89_core_deinit(rtwdev); 4488 rtw89_free_ieee80211_hw(rtwdev); 4489 } 4490 EXPORT_SYMBOL(rtw89_pci_remove); 4491 4492 MODULE_AUTHOR("Realtek Corporation"); 4493 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); 4494 MODULE_LICENSE("Dual BSD/GPL"); 4495 #if defined(__FreeBSD__) 4496 MODULE_VERSION(rtw89_pci, 1); 4497 MODULE_DEPEND(rtw89_pci, linuxkpi, 1, 1, 1); 4498 MODULE_DEPEND(rtw89_pci, linuxkpi_wlan, 1, 1, 1); 4499 #ifdef CONFIG_RTW89_DEBUGFS 4500 MODULE_DEPEND(rtw89_pci, lindebugfs, 1, 1, 1); 4501 #endif 4502 #endif 4503