1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev, 23 u32 *phy_offset) 24 { 25 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 26 struct pci_dev *pdev = rtwpci->pdev; 27 u32 val; 28 int ret; 29 30 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 31 if (ret) 32 return ret; 33 34 val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK); 35 if (val == RTW89_PCIE_GEN1_SPEED) { 36 *phy_offset = R_RAC_DIRECT_OFFSET_G1; 37 } else if (val == RTW89_PCIE_GEN2_SPEED) { 38 *phy_offset = R_RAC_DIRECT_OFFSET_G2; 39 } else { 40 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val); 41 return -EFAULT; 42 } 43 44 return 0; 45 } 46 47 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) 48 { 49 u32 val; 50 int ret; 51 52 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM); 53 54 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 55 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 56 rtwdev, R_AX_PCIE_INIT_CFG1); 57 58 return ret; 59 } 60 61 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_dma_ring *bd_ring, 63 u32 cur_idx, bool tx) 64 { 65 const struct rtw89_pci_info *info = rtwdev->pci_info; 66 u32 cnt, cur_rp, wp, rp, len; 67 68 rp = bd_ring->rp; 69 wp = bd_ring->wp; 70 len = bd_ring->len; 71 72 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 73 if (tx) { 74 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 75 } else { 76 if (info->rx_ring_eq_is_full) 77 wp += 1; 78 79 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 80 } 81 82 bd_ring->rp = cur_rp; 83 84 return cnt; 85 } 86 87 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 88 struct rtw89_pci_tx_ring *tx_ring) 89 { 90 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 91 u32 addr_idx = bd_ring->addr.idx; 92 u32 cnt, idx; 93 94 idx = rtw89_read32(rtwdev, addr_idx); 95 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 96 97 return cnt; 98 } 99 100 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 101 struct rtw89_pci *rtwpci, 102 u32 cnt, bool release_all) 103 { 104 struct rtw89_pci_tx_data *tx_data; 105 struct sk_buff *skb; 106 u32 qlen; 107 108 while (cnt--) { 109 skb = skb_dequeue(&rtwpci->h2c_queue); 110 if (!skb) { 111 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 112 return; 113 } 114 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 115 } 116 117 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 118 if (!release_all) 119 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 120 121 while (qlen--) { 122 skb = skb_dequeue(&rtwpci->h2c_release_queue); 123 if (!skb) { 124 rtw89_err(rtwdev, "failed to release fwcmd\n"); 125 return; 126 } 127 tx_data = RTW89_PCI_TX_SKB_CB(skb); 128 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 129 DMA_TO_DEVICE); 130 dev_kfree_skb_any(skb); 131 } 132 } 133 134 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 135 struct rtw89_pci *rtwpci) 136 { 137 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 138 u32 cnt; 139 140 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 141 if (!cnt) 142 return; 143 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 144 } 145 146 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 147 struct rtw89_pci_rx_ring *rx_ring) 148 { 149 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 150 u32 addr_idx = bd_ring->addr.idx; 151 u32 cnt, idx; 152 153 idx = rtw89_read32(rtwdev, addr_idx); 154 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 155 156 return cnt; 157 } 158 159 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 160 struct sk_buff *skb) 161 { 162 struct rtw89_pci_rx_info *rx_info; 163 dma_addr_t dma; 164 165 rx_info = RTW89_PCI_RX_SKB_CB(skb); 166 dma = rx_info->dma; 167 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 168 DMA_FROM_DEVICE); 169 } 170 171 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 172 struct sk_buff *skb) 173 { 174 struct rtw89_pci_rx_info *rx_info; 175 dma_addr_t dma; 176 177 rx_info = RTW89_PCI_RX_SKB_CB(skb); 178 dma = rx_info->dma; 179 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 180 DMA_FROM_DEVICE); 181 } 182 183 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 184 struct sk_buff *skb) 185 { 186 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 187 struct rtw89_pci_rxbd_info *rxbd_info; 188 __le32 info; 189 190 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 191 info = rxbd_info->dword; 192 193 rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); 194 rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); 195 rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); 196 rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); 197 } 198 199 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, 200 struct rtw89_pci_rx_ring *rx_ring, 201 struct sk_buff *skb) 202 { 203 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 204 const struct rtw89_pci_info *info = rtwdev->pci_info; 205 u32 target_rx_tag; 206 207 if (!info->check_rx_tag) 208 return 0; 209 210 /* valid range is 1 ~ 0x1FFF */ 211 if (rx_ring->target_rx_tag == 0) 212 target_rx_tag = 1; 213 else 214 target_rx_tag = rx_ring->target_rx_tag; 215 216 if (rx_info->tag != target_rx_tag) { 217 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n", 218 rx_info->tag, target_rx_tag); 219 return -EAGAIN; 220 } 221 222 return 0; 223 } 224 225 static 226 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, 227 struct rtw89_pci_rx_ring *rx_ring, 228 struct sk_buff *skb) 229 { 230 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 231 int rx_tag_retry = 100; 232 int ret; 233 234 do { 235 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 236 rtw89_pci_rxbd_info_update(rtwdev, skb); 237 238 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb); 239 if (ret != -EAGAIN) 240 break; 241 } while (rx_tag_retry--); 242 243 /* update target rx_tag for next RX */ 244 rx_ring->target_rx_tag = rx_info->tag + 1; 245 246 return ret; 247 } 248 249 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable) 250 { 251 const struct rtw89_pci_info *info = rtwdev->pci_info; 252 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 253 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 254 255 if (enable) { 256 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 257 if (dma_stop2->addr) 258 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 259 } else { 260 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 261 if (dma_stop2->addr) 262 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 263 } 264 } 265 266 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable) 267 { 268 const struct rtw89_pci_info *info = rtwdev->pci_info; 269 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 270 271 if (enable) 272 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 273 else 274 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 275 } 276 277 static bool 278 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 279 struct sk_buff *new, 280 const struct sk_buff *skb, u32 offset, 281 const struct rtw89_pci_rx_info *rx_info, 282 const struct rtw89_rx_desc_info *desc_info) 283 { 284 u32 copy_len = rx_info->len - offset; 285 286 if (unlikely(skb_tailroom(new) < copy_len)) { 287 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 288 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 289 rx_info->len, desc_info->pkt_size, offset, fs, ls); 290 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 291 skb->data, rx_info->len); 292 /* length of a single segment skb is desc_info->pkt_size */ 293 if (fs && ls) { 294 copy_len = desc_info->pkt_size; 295 } else { 296 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 297 return false; 298 } 299 } 300 301 skb_put_data(new, skb->data + offset, copy_len); 302 303 return true; 304 } 305 306 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev, 307 struct rtw89_pci_dma_ring *bd_ring) 308 { 309 const struct rtw89_pci_info *info = rtwdev->pci_info; 310 u32 wp = bd_ring->wp; 311 312 if (!info->rx_ring_eq_is_full) 313 return wp; 314 315 if (++wp >= bd_ring->len) 316 wp = 0; 317 318 return wp; 319 } 320 321 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 322 struct rtw89_pci_rx_ring *rx_ring) 323 { 324 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 325 struct rtw89_pci_rx_info *rx_info; 326 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 327 struct sk_buff *new = rx_ring->diliver_skb; 328 struct sk_buff *skb; 329 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 330 u32 skb_idx; 331 u32 offset; 332 u32 cnt = 1; 333 bool fs, ls; 334 int ret; 335 336 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 337 skb = rx_ring->buf[skb_idx]; 338 339 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 340 if (ret) { 341 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 342 bd_ring->wp, ret); 343 goto err_sync_device; 344 } 345 346 rx_info = RTW89_PCI_RX_SKB_CB(skb); 347 fs = rx_info->fs; 348 ls = rx_info->ls; 349 350 if (fs) { 351 if (new) { 352 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 353 "skb should not be ready before first segment start\n"); 354 goto err_sync_device; 355 } 356 if (desc_info->ready) { 357 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 358 goto err_sync_device; 359 } 360 361 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 362 363 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 364 if (!new) 365 goto err_sync_device; 366 367 rx_ring->diliver_skb = new; 368 369 /* first segment has RX desc */ 370 offset = desc_info->offset + desc_info->rxd_len; 371 } else { 372 offset = sizeof(struct rtw89_pci_rxbd_info); 373 if (!new) { 374 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 375 goto err_sync_device; 376 } 377 } 378 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 379 goto err_sync_device; 380 rtw89_pci_sync_skb_for_device(rtwdev, skb); 381 rtw89_pci_rxbd_increase(rx_ring, 1); 382 383 if (!desc_info->ready) { 384 rtw89_warn(rtwdev, "no rx desc information\n"); 385 goto err_free_resource; 386 } 387 if (ls) { 388 rtw89_core_rx(rtwdev, desc_info, new); 389 rx_ring->diliver_skb = NULL; 390 desc_info->ready = false; 391 } 392 393 return cnt; 394 395 err_sync_device: 396 rtw89_pci_sync_skb_for_device(rtwdev, skb); 397 rtw89_pci_rxbd_increase(rx_ring, 1); 398 err_free_resource: 399 if (new) 400 dev_kfree_skb_any(new); 401 rx_ring->diliver_skb = NULL; 402 desc_info->ready = false; 403 404 return cnt; 405 } 406 407 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 408 struct rtw89_pci_rx_ring *rx_ring, 409 u32 cnt) 410 { 411 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 412 u32 rx_cnt; 413 414 while (cnt && rtwdev->napi_budget_countdown > 0) { 415 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 416 if (!rx_cnt) { 417 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 418 419 /* skip the rest RXBD bufs */ 420 rtw89_pci_rxbd_increase(rx_ring, cnt); 421 break; 422 } 423 424 cnt -= rx_cnt; 425 } 426 427 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 428 } 429 430 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 431 struct rtw89_pci *rtwpci, int budget) 432 { 433 struct rtw89_pci_rx_ring *rx_ring; 434 int countdown = rtwdev->napi_budget_countdown; 435 u32 cnt; 436 437 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 438 439 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 440 if (!cnt) 441 return 0; 442 443 cnt = min_t(u32, budget, cnt); 444 445 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 446 447 /* In case of flushing pending SKBs, the countdown may exceed. */ 448 if (rtwdev->napi_budget_countdown <= 0) 449 return budget; 450 451 return budget - countdown; 452 } 453 454 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 455 struct rtw89_pci_tx_ring *tx_ring, 456 struct sk_buff *skb, u8 tx_status) 457 { 458 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 459 struct ieee80211_tx_info *info; 460 461 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 462 463 info = IEEE80211_SKB_CB(skb); 464 ieee80211_tx_info_clear_status(info); 465 466 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 467 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 468 if (tx_status == RTW89_TX_DONE) { 469 info->flags |= IEEE80211_TX_STAT_ACK; 470 tx_ring->tx_acked++; 471 } else { 472 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 473 rtw89_debug(rtwdev, RTW89_DBG_FW, 474 "failed to TX of status %x\n", tx_status); 475 switch (tx_status) { 476 case RTW89_TX_RETRY_LIMIT: 477 tx_ring->tx_retry_lmt++; 478 break; 479 case RTW89_TX_LIFE_TIME: 480 tx_ring->tx_life_time++; 481 break; 482 case RTW89_TX_MACID_DROP: 483 tx_ring->tx_mac_id_drop++; 484 break; 485 default: 486 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 487 break; 488 } 489 } 490 491 ieee80211_tx_status_ni(rtwdev->hw, skb); 492 } 493 494 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 495 { 496 struct rtw89_pci_tx_wd *txwd; 497 u32 cnt; 498 499 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 500 while (cnt--) { 501 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 502 if (!txwd) { 503 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 504 break; 505 } 506 507 list_del_init(&txwd->list); 508 509 /* this skb has been freed by RPP */ 510 if (skb_queue_len(&txwd->queue) == 0) 511 rtw89_pci_enqueue_txwd(tx_ring, txwd); 512 } 513 } 514 515 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 516 struct rtw89_pci_tx_ring *tx_ring) 517 { 518 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 519 struct rtw89_pci_tx_wd *txwd; 520 int i; 521 522 for (i = 0; i < wd_ring->page_num; i++) { 523 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 524 if (!txwd) 525 break; 526 527 list_del_init(&txwd->list); 528 } 529 } 530 531 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 532 struct rtw89_pci_tx_ring *tx_ring, 533 struct rtw89_pci_tx_wd *txwd, u16 seq, 534 u8 tx_status) 535 { 536 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 537 struct rtw89_pci_tx_data *tx_data; 538 struct sk_buff *skb, *tmp; 539 u8 txch = tx_ring->txch; 540 541 if (!list_empty(&txwd->list)) { 542 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 543 /* In low power mode, RPP can receive before updating of TX BD. 544 * In normal mode, it should not happen so give it a warning. 545 */ 546 if (!rtwpci->low_power && !list_empty(&txwd->list)) 547 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 548 txch, seq); 549 } 550 551 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 552 skb_unlink(skb, &txwd->queue); 553 554 tx_data = RTW89_PCI_TX_SKB_CB(skb); 555 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 556 DMA_TO_DEVICE); 557 558 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 559 } 560 561 if (list_empty(&txwd->list)) 562 rtw89_pci_enqueue_txwd(tx_ring, txwd); 563 } 564 565 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 566 struct rtw89_pci_rpp_fmt *rpp) 567 { 568 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 569 struct rtw89_pci_tx_ring *tx_ring; 570 struct rtw89_pci_tx_wd_ring *wd_ring; 571 struct rtw89_pci_tx_wd *txwd; 572 u16 seq; 573 u8 qsel, tx_status, txch; 574 575 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 576 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 577 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 578 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 579 580 if (txch == RTW89_TXCH_CH12) { 581 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 582 return; 583 } 584 585 tx_ring = &rtwpci->tx_rings[txch]; 586 wd_ring = &tx_ring->wd_ring; 587 txwd = &wd_ring->pages[seq]; 588 589 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 590 } 591 592 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 593 struct rtw89_pci_tx_ring *tx_ring) 594 { 595 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 596 struct rtw89_pci_tx_wd *txwd; 597 int i; 598 599 for (i = 0; i < wd_ring->page_num; i++) { 600 txwd = &wd_ring->pages[i]; 601 602 if (!list_empty(&txwd->list)) 603 continue; 604 605 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 606 } 607 } 608 609 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 610 struct rtw89_pci_rx_ring *rx_ring, 611 u32 max_cnt) 612 { 613 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 614 struct rtw89_pci_rx_info *rx_info; 615 struct rtw89_pci_rpp_fmt *rpp; 616 struct rtw89_rx_desc_info desc_info = {}; 617 struct sk_buff *skb; 618 u32 cnt = 0; 619 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 620 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 621 u32 skb_idx; 622 u32 offset; 623 int ret; 624 625 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 626 skb = rx_ring->buf[skb_idx]; 627 628 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 629 if (ret) { 630 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 631 bd_ring->wp, ret); 632 goto err_sync_device; 633 } 634 635 rx_info = RTW89_PCI_RX_SKB_CB(skb); 636 if (!rx_info->fs || !rx_info->ls) { 637 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 638 return cnt; 639 } 640 641 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 642 643 /* first segment has RX desc */ 644 offset = desc_info.offset + desc_info.rxd_len; 645 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 646 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 647 rtw89_pci_release_rpp(rtwdev, rpp); 648 } 649 650 rtw89_pci_sync_skb_for_device(rtwdev, skb); 651 rtw89_pci_rxbd_increase(rx_ring, 1); 652 cnt++; 653 654 return cnt; 655 656 err_sync_device: 657 rtw89_pci_sync_skb_for_device(rtwdev, skb); 658 return 0; 659 } 660 661 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 662 struct rtw89_pci_rx_ring *rx_ring, 663 u32 cnt) 664 { 665 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 666 u32 release_cnt; 667 668 while (cnt) { 669 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 670 if (!release_cnt) { 671 rtw89_err(rtwdev, "failed to release TX skbs\n"); 672 673 /* skip the rest RXBD bufs */ 674 rtw89_pci_rxbd_increase(rx_ring, cnt); 675 break; 676 } 677 678 cnt -= release_cnt; 679 } 680 681 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 682 } 683 684 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 685 struct rtw89_pci *rtwpci, int budget) 686 { 687 struct rtw89_pci_rx_ring *rx_ring; 688 u32 cnt; 689 int work_done; 690 691 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 692 693 spin_lock_bh(&rtwpci->trx_lock); 694 695 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 696 if (cnt == 0) 697 goto out_unlock; 698 699 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 700 701 out_unlock: 702 spin_unlock_bh(&rtwpci->trx_lock); 703 704 /* always release all RPQ */ 705 work_done = min_t(int, cnt, budget); 706 rtwdev->napi_budget_countdown -= work_done; 707 708 return work_done; 709 } 710 711 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 712 struct rtw89_pci *rtwpci) 713 { 714 struct rtw89_pci_rx_ring *rx_ring; 715 struct rtw89_pci_dma_ring *bd_ring; 716 u32 reg_idx; 717 u16 hw_idx, hw_idx_next, host_idx; 718 int i; 719 720 for (i = 0; i < RTW89_RXCH_NUM; i++) { 721 rx_ring = &rtwpci->rx_rings[i]; 722 bd_ring = &rx_ring->bd_ring; 723 724 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 725 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 726 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 727 hw_idx_next = (hw_idx + 1) % bd_ring->len; 728 729 if (hw_idx_next == host_idx) 730 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 731 732 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 733 "%d RXD unavailable, idx=0x%08x, len=%d\n", 734 i, reg_idx, bd_ring->len); 735 } 736 } 737 738 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 739 struct rtw89_pci *rtwpci, 740 struct rtw89_pci_isrs *isrs) 741 { 742 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 743 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 744 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 745 746 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 747 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 748 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 749 } 750 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 751 752 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 753 struct rtw89_pci *rtwpci, 754 struct rtw89_pci_isrs *isrs) 755 { 756 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 757 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 758 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 759 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 760 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 761 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 762 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 763 764 if (isrs->halt_c2h_isrs) 765 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 766 if (isrs->isrs[0]) 767 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 768 if (isrs->isrs[1]) 769 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 770 } 771 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 772 773 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, 774 struct rtw89_pci *rtwpci, 775 struct rtw89_pci_isrs *isrs) 776 { 777 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 778 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 779 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 780 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ? 781 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0; 782 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; 783 784 if (isrs->halt_c2h_isrs) 785 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 786 if (isrs->isrs[0]) 787 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]); 788 if (isrs->isrs[1]) 789 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 790 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 791 } 792 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); 793 794 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 795 { 796 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 797 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 798 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 799 } 800 EXPORT_SYMBOL(rtw89_pci_enable_intr); 801 802 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 803 { 804 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 805 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 806 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 807 } 808 EXPORT_SYMBOL(rtw89_pci_disable_intr); 809 810 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 811 { 812 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 813 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 814 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 815 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 816 } 817 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 818 819 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 820 { 821 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 822 } 823 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 824 825 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 826 { 827 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 828 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]); 829 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 830 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 831 } 832 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2); 833 834 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 835 { 836 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 837 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 838 } 839 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); 840 841 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 842 { 843 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 844 unsigned long flags; 845 846 spin_lock_irqsave(&rtwpci->irq_lock, flags); 847 rtw89_chip_disable_intr(rtwdev, rtwpci); 848 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 849 rtw89_chip_enable_intr(rtwdev, rtwpci); 850 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 851 } 852 853 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 854 { 855 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 856 unsigned long flags; 857 858 spin_lock_irqsave(&rtwpci->irq_lock, flags); 859 rtw89_chip_disable_intr(rtwdev, rtwpci); 860 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 861 rtw89_chip_enable_intr(rtwdev, rtwpci); 862 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 863 } 864 865 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 866 { 867 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 868 int budget = NAPI_POLL_WEIGHT; 869 870 /* To prevent RXQ get stuck due to run out of budget. */ 871 rtwdev->napi_budget_countdown = budget; 872 873 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 874 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 875 } 876 877 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 878 { 879 struct rtw89_dev *rtwdev = dev; 880 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 881 const struct rtw89_pci_info *info = rtwdev->pci_info; 882 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 883 struct rtw89_pci_isrs isrs; 884 unsigned long flags; 885 886 spin_lock_irqsave(&rtwpci->irq_lock, flags); 887 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 888 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 889 890 if (unlikely(isrs.isrs[0] & gen_def->isr_rdu)) 891 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 892 893 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h)) 894 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 895 896 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout)) 897 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 898 899 if (unlikely(rtwpci->under_recovery)) 900 goto enable_intr; 901 902 if (unlikely(rtwpci->low_power)) { 903 rtw89_pci_low_power_interrupt_handler(rtwdev); 904 goto enable_intr; 905 } 906 907 if (likely(rtwpci->running)) { 908 local_bh_disable(); 909 napi_schedule(&rtwdev->napi); 910 local_bh_enable(); 911 } 912 913 return IRQ_HANDLED; 914 915 enable_intr: 916 spin_lock_irqsave(&rtwpci->irq_lock, flags); 917 if (likely(rtwpci->running)) 918 rtw89_chip_enable_intr(rtwdev, rtwpci); 919 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 920 return IRQ_HANDLED; 921 } 922 923 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 924 { 925 struct rtw89_dev *rtwdev = dev; 926 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 927 unsigned long flags; 928 irqreturn_t irqret = IRQ_WAKE_THREAD; 929 930 spin_lock_irqsave(&rtwpci->irq_lock, flags); 931 932 /* If interrupt event is on the road, it is still trigger interrupt 933 * even we have done pci_stop() to turn off IMR. 934 */ 935 if (unlikely(!rtwpci->running)) { 936 irqret = IRQ_HANDLED; 937 goto exit; 938 } 939 940 rtw89_chip_disable_intr(rtwdev, rtwpci); 941 exit: 942 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 943 944 return irqret; 945 } 946 947 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ 948 [RTW89_TXCH_##ch_idx] = { \ 949 .num = R_##gen##_##txch##_TXBD_NUM ##v, \ 950 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 951 .bdram = 0, \ 952 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \ 953 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \ 954 } 955 956 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 957 [RTW89_TXCH_##txch] = { \ 958 .num = R_AX_##txch##_TXBD_NUM ##v, \ 959 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 960 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 961 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 962 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 963 } 964 965 #define DEF_TXCHADDRS(info, txch, v...) \ 966 [RTW89_TXCH_##txch] = { \ 967 .num = R_AX_##txch##_TXBD_NUM, \ 968 .idx = R_AX_##txch##_TXBD_IDX, \ 969 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 970 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 971 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 972 } 973 974 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ 975 [RTW89_RXCH_##ch_idx] = { \ 976 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \ 977 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \ 978 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \ 979 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \ 980 } 981 982 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 983 .tx = { 984 DEF_TXCHADDRS(info, ACH0), 985 DEF_TXCHADDRS(info, ACH1), 986 DEF_TXCHADDRS(info, ACH2), 987 DEF_TXCHADDRS(info, ACH3), 988 DEF_TXCHADDRS(info, ACH4), 989 DEF_TXCHADDRS(info, ACH5), 990 DEF_TXCHADDRS(info, ACH6), 991 DEF_TXCHADDRS(info, ACH7), 992 DEF_TXCHADDRS(info, CH8), 993 DEF_TXCHADDRS(info, CH9), 994 DEF_TXCHADDRS_TYPE1(info, CH10), 995 DEF_TXCHADDRS_TYPE1(info, CH11), 996 DEF_TXCHADDRS(info, CH12), 997 }, 998 .rx = { 999 DEF_RXCHADDRS(AX, RXQ, RXQ), 1000 DEF_RXCHADDRS(AX, RPQ, RPQ), 1001 }, 1002 }; 1003 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 1004 1005 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 1006 .tx = { 1007 DEF_TXCHADDRS(info, ACH0, _V1), 1008 DEF_TXCHADDRS(info, ACH1, _V1), 1009 DEF_TXCHADDRS(info, ACH2, _V1), 1010 DEF_TXCHADDRS(info, ACH3, _V1), 1011 DEF_TXCHADDRS(info, ACH4, _V1), 1012 DEF_TXCHADDRS(info, ACH5, _V1), 1013 DEF_TXCHADDRS(info, ACH6, _V1), 1014 DEF_TXCHADDRS(info, ACH7, _V1), 1015 DEF_TXCHADDRS(info, CH8, _V1), 1016 DEF_TXCHADDRS(info, CH9, _V1), 1017 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 1018 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 1019 DEF_TXCHADDRS(info, CH12, _V1), 1020 }, 1021 .rx = { 1022 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1), 1023 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1), 1024 }, 1025 }; 1026 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 1027 1028 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = { 1029 .tx = { 1030 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1), 1031 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1), 1032 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1), 1033 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1), 1034 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1), 1035 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1), 1036 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1), 1037 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1), 1038 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1), 1039 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1), 1040 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1), 1041 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1), 1042 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1), 1043 }, 1044 .rx = { 1045 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1), 1046 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1), 1047 }, 1048 }; 1049 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); 1050 1051 #undef DEF_TXCHADDRS_TYPE1 1052 #undef DEF_TXCHADDRS 1053 #undef DEF_RXCHADDRS 1054 1055 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 1056 enum rtw89_tx_channel txch, 1057 const struct rtw89_pci_ch_dma_addr **addr) 1058 { 1059 const struct rtw89_pci_info *info = rtwdev->pci_info; 1060 1061 if (txch >= RTW89_TXCH_NUM) 1062 return -EINVAL; 1063 1064 *addr = &info->dma_addr_set->tx[txch]; 1065 1066 return 0; 1067 } 1068 1069 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 1070 enum rtw89_rx_channel rxch, 1071 const struct rtw89_pci_ch_dma_addr **addr) 1072 { 1073 const struct rtw89_pci_info *info = rtwdev->pci_info; 1074 1075 if (rxch >= RTW89_RXCH_NUM) 1076 return -EINVAL; 1077 1078 *addr = &info->dma_addr_set->rx[rxch]; 1079 1080 return 0; 1081 } 1082 1083 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 1084 { 1085 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 1086 1087 /* reserved 1 desc check ring is full or not */ 1088 if (bd_ring->rp > bd_ring->wp) 1089 return bd_ring->rp - bd_ring->wp - 1; 1090 1091 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 1092 } 1093 1094 static 1095 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 1096 { 1097 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1098 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 1099 u32 cnt; 1100 1101 spin_lock_bh(&rtwpci->trx_lock); 1102 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 1103 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1104 spin_unlock_bh(&rtwpci->trx_lock); 1105 1106 return cnt; 1107 } 1108 1109 static 1110 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 1111 u8 txch) 1112 { 1113 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1114 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1115 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1116 u32 cnt; 1117 1118 spin_lock_bh(&rtwpci->trx_lock); 1119 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1120 if (txch != RTW89_TXCH_CH12) 1121 cnt = min(cnt, wd_ring->curr_num); 1122 spin_unlock_bh(&rtwpci->trx_lock); 1123 1124 return cnt; 1125 } 1126 1127 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1128 u8 txch) 1129 { 1130 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1131 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1132 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1133 const struct rtw89_chip_info *chip = rtwdev->chip; 1134 u32 bd_cnt, wd_cnt, min_cnt = 0; 1135 struct rtw89_pci_rx_ring *rx_ring; 1136 enum rtw89_debug_mask debug_mask; 1137 u32 cnt; 1138 1139 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 1140 1141 spin_lock_bh(&rtwpci->trx_lock); 1142 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1143 wd_cnt = wd_ring->curr_num; 1144 1145 if (wd_cnt == 0 || bd_cnt == 0) { 1146 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 1147 if (cnt) 1148 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 1149 else if (wd_cnt == 0) 1150 goto out_unlock; 1151 1152 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1153 if (bd_cnt == 0) 1154 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 1155 } 1156 1157 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1158 wd_cnt = wd_ring->curr_num; 1159 min_cnt = min(bd_cnt, wd_cnt); 1160 if (min_cnt == 0) { 1161 /* This message can be frequently shown in low power mode or 1162 * high traffic with small FIFO chips, and we have recognized it as normal 1163 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1164 */ 1165 if (rtwpci->low_power || chip->small_fifo_size) 1166 debug_mask = RTW89_DBG_TXRX; 1167 else 1168 debug_mask = RTW89_DBG_UNEXP; 1169 1170 rtw89_debug(rtwdev, debug_mask, 1171 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1172 wd_cnt, bd_cnt); 1173 } 1174 1175 out_unlock: 1176 spin_unlock_bh(&rtwpci->trx_lock); 1177 1178 return min_cnt; 1179 } 1180 1181 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1182 u8 txch) 1183 { 1184 if (rtwdev->hci.paused) 1185 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1186 1187 if (txch == RTW89_TXCH_CH12) 1188 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1189 1190 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1191 } 1192 1193 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1194 { 1195 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1196 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1197 u32 host_idx, addr; 1198 1199 spin_lock_bh(&rtwpci->trx_lock); 1200 1201 addr = bd_ring->addr.idx; 1202 host_idx = bd_ring->wp; 1203 rtw89_write16(rtwdev, addr, host_idx); 1204 1205 spin_unlock_bh(&rtwpci->trx_lock); 1206 } 1207 1208 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1209 int n_txbd) 1210 { 1211 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1212 u32 host_idx, len; 1213 1214 len = bd_ring->len; 1215 host_idx = bd_ring->wp + n_txbd; 1216 host_idx = host_idx < len ? host_idx : host_idx - len; 1217 1218 bd_ring->wp = host_idx; 1219 } 1220 1221 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1222 { 1223 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1224 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1225 1226 if (rtwdev->hci.paused) { 1227 set_bit(txch, rtwpci->kick_map); 1228 return; 1229 } 1230 1231 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1232 } 1233 1234 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1235 { 1236 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1237 struct rtw89_pci_tx_ring *tx_ring; 1238 int txch; 1239 1240 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1241 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1242 continue; 1243 1244 tx_ring = &rtwpci->tx_rings[txch]; 1245 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1246 } 1247 } 1248 1249 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1250 { 1251 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1252 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1253 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1254 u32 cur_idx, cur_rp; 1255 u8 i; 1256 1257 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1258 * define a reasonable fixed total timeout to use read_poll_timeout* 1259 * helper. Instead, we can ensure a reasonable polling times, so we 1260 * just use for loop with udelay here. 1261 */ 1262 for (i = 0; i < 60; i++) { 1263 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1264 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1265 if (cur_rp == bd_ring->wp) 1266 return; 1267 1268 udelay(1); 1269 } 1270 1271 if (!drop) 1272 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1273 } 1274 1275 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1276 bool drop) 1277 { 1278 const struct rtw89_pci_info *info = rtwdev->pci_info; 1279 u8 i; 1280 1281 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1282 /* It may be unnecessary to flush FWCMD queue. */ 1283 if (i == RTW89_TXCH_CH12) 1284 continue; 1285 if (info->tx_dma_ch_mask & BIT(i)) 1286 continue; 1287 1288 if (txchs & BIT(i)) 1289 __pci_flush_txch(rtwdev, i, drop); 1290 } 1291 } 1292 1293 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1294 bool drop) 1295 { 1296 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1297 } 1298 1299 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1300 void *txaddr_info_addr, u32 total_len, 1301 dma_addr_t dma, u8 *add_info_nr) 1302 { 1303 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1304 __le16 option; 1305 1306 txaddr_info->length = cpu_to_le16(total_len); 1307 option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1)); 1308 option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK); 1309 txaddr_info->option = option; 1310 txaddr_info->dma = cpu_to_le32(dma); 1311 1312 *add_info_nr = 1; 1313 1314 return sizeof(*txaddr_info); 1315 } 1316 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1317 1318 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1319 void *txaddr_info_addr, u32 total_len, 1320 dma_addr_t dma, u8 *add_info_nr) 1321 { 1322 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1323 u32 remain = total_len; 1324 u32 len; 1325 u16 length_option; 1326 int n; 1327 1328 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1329 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1330 TXADDR_INFO_LENTHG_V1_MAX : remain; 1331 remain -= len; 1332 1333 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1334 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1335 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1336 length_option |= u16_encode_bits(upper_32_bits(dma), 1337 B_PCIADDR_HIGH_SEL_V1_MASK); 1338 txaddr_info->length_opt = cpu_to_le16(length_option); 1339 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1340 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1341 1342 dma += len; 1343 txaddr_info++; 1344 } 1345 1346 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1347 remain, total_len); 1348 1349 *add_info_nr = n; 1350 1351 return n * sizeof(*txaddr_info); 1352 } 1353 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1354 1355 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1356 struct rtw89_pci_tx_ring *tx_ring, 1357 struct rtw89_pci_tx_wd *txwd, 1358 struct rtw89_core_tx_request *tx_req) 1359 { 1360 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1361 const struct rtw89_chip_info *chip = rtwdev->chip; 1362 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1363 struct rtw89_pci_tx_wp_info *txwp_info; 1364 void *txaddr_info_addr; 1365 struct pci_dev *pdev = rtwpci->pdev; 1366 struct sk_buff *skb = tx_req->skb; 1367 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1368 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1369 bool en_wd_info = desc_info->en_wd_info; 1370 u32 txwd_len; 1371 u32 txwp_len; 1372 u32 txaddr_info_len; 1373 dma_addr_t dma; 1374 int ret; 1375 1376 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1377 if (dma_mapping_error(&pdev->dev, dma)) { 1378 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1379 ret = -EBUSY; 1380 goto err; 1381 } 1382 1383 tx_data->dma = dma; 1384 rcu_assign_pointer(skb_data->wait, NULL); 1385 1386 txwp_len = sizeof(*txwp_info); 1387 txwd_len = chip->txwd_body_size; 1388 txwd_len += en_wd_info ? chip->txwd_info_size : 0; 1389 1390 txwp_info = txwd->vaddr + txwd_len; 1391 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1392 txwp_info->seq1 = 0; 1393 txwp_info->seq2 = 0; 1394 txwp_info->seq3 = 0; 1395 1396 tx_ring->tx_cnt++; 1397 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1398 txaddr_info_len = 1399 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1400 dma, &desc_info->addr_info_nr); 1401 1402 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1403 1404 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1405 1406 skb_queue_tail(&txwd->queue, skb); 1407 1408 return 0; 1409 1410 err: 1411 return ret; 1412 } 1413 1414 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1415 struct rtw89_pci_tx_ring *tx_ring, 1416 struct rtw89_pci_tx_bd_32 *txbd, 1417 struct rtw89_core_tx_request *tx_req) 1418 { 1419 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1420 const struct rtw89_chip_info *chip = rtwdev->chip; 1421 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1422 void *txdesc; 1423 int txdesc_size = chip->h2c_desc_size; 1424 struct pci_dev *pdev = rtwpci->pdev; 1425 struct sk_buff *skb = tx_req->skb; 1426 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1427 dma_addr_t dma; 1428 __le16 opt; 1429 1430 txdesc = skb_push(skb, txdesc_size); 1431 memset(txdesc, 0, txdesc_size); 1432 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1433 1434 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1435 if (dma_mapping_error(&pdev->dev, dma)) { 1436 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1437 return -EBUSY; 1438 } 1439 1440 tx_data->dma = dma; 1441 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1442 opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI); 1443 txbd->opt = opt; 1444 txbd->length = cpu_to_le16(skb->len); 1445 txbd->dma = cpu_to_le32(tx_data->dma); 1446 skb_queue_tail(&rtwpci->h2c_queue, skb); 1447 1448 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1449 1450 return 0; 1451 } 1452 1453 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1454 struct rtw89_pci_tx_ring *tx_ring, 1455 struct rtw89_pci_tx_bd_32 *txbd, 1456 struct rtw89_core_tx_request *tx_req) 1457 { 1458 struct rtw89_pci_tx_wd *txwd; 1459 __le16 opt; 1460 int ret; 1461 1462 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1463 * buffer with WD BODY only. So here we don't need to check the free 1464 * pages of the wd ring. 1465 */ 1466 if (tx_ring->txch == RTW89_TXCH_CH12) 1467 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1468 1469 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1470 if (!txwd) { 1471 rtw89_err(rtwdev, "no available TXWD\n"); 1472 ret = -ENOSPC; 1473 goto err; 1474 } 1475 1476 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1477 if (ret) { 1478 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1479 goto err_enqueue_wd; 1480 } 1481 1482 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1483 1484 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1485 opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI); 1486 txbd->opt = opt; 1487 txbd->length = cpu_to_le16(txwd->len); 1488 txbd->dma = cpu_to_le32(txwd->paddr); 1489 1490 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1491 1492 return 0; 1493 1494 err_enqueue_wd: 1495 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1496 err: 1497 return ret; 1498 } 1499 1500 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1501 u8 txch) 1502 { 1503 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1504 struct rtw89_pci_tx_ring *tx_ring; 1505 struct rtw89_pci_tx_bd_32 *txbd; 1506 u32 n_avail_txbd; 1507 int ret = 0; 1508 1509 /* check the tx type and dma channel for fw cmd queue */ 1510 if ((txch == RTW89_TXCH_CH12 || 1511 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1512 (txch != RTW89_TXCH_CH12 || 1513 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1514 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1515 return -EINVAL; 1516 } 1517 1518 tx_ring = &rtwpci->tx_rings[txch]; 1519 spin_lock_bh(&rtwpci->trx_lock); 1520 1521 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1522 if (n_avail_txbd == 0) { 1523 rtw89_err(rtwdev, "no available TXBD\n"); 1524 ret = -ENOSPC; 1525 goto err_unlock; 1526 } 1527 1528 txbd = rtw89_pci_get_next_txbd(tx_ring); 1529 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1530 if (ret) { 1531 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1532 goto err_unlock; 1533 } 1534 1535 spin_unlock_bh(&rtwpci->trx_lock); 1536 return 0; 1537 1538 err_unlock: 1539 spin_unlock_bh(&rtwpci->trx_lock); 1540 return ret; 1541 } 1542 1543 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1544 { 1545 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1546 int ret; 1547 1548 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1549 if (ret) { 1550 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1551 return ret; 1552 } 1553 1554 return 0; 1555 } 1556 1557 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1558 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1559 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1560 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1561 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1562 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1563 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1564 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1565 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1566 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1567 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1568 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1569 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1570 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1571 }; 1572 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1573 1574 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1575 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1576 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1577 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1578 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1579 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1580 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1581 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1582 }; 1583 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1584 1585 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev) 1586 { 1587 const struct rtw89_pci_info *info = rtwdev->pci_info; 1588 u32 addr = info->wp_sel_addr; 1589 u32 val; 1590 int i; 1591 1592 if (!info->wp_sel_addr) 1593 return; 1594 1595 for (i = 0; i < 16; i += 4) { 1596 val = u32_encode_bits(i + 0, MASKBYTE0) | 1597 u32_encode_bits(i + 1, MASKBYTE1) | 1598 u32_encode_bits(i + 2, MASKBYTE2) | 1599 u32_encode_bits(i + 3, MASKBYTE3); 1600 rtw89_write32(rtwdev, addr + i, val); 1601 } 1602 } 1603 1604 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1605 { 1606 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1607 const struct rtw89_pci_info *info = rtwdev->pci_info; 1608 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1609 struct rtw89_pci_tx_ring *tx_ring; 1610 struct rtw89_pci_rx_ring *rx_ring; 1611 struct rtw89_pci_dma_ring *bd_ring; 1612 const struct rtw89_pci_bd_ram *bd_ram; 1613 u32 addr_num; 1614 u32 addr_idx; 1615 u32 addr_bdram; 1616 u32 addr_desa_l; 1617 u32 val32; 1618 int i; 1619 1620 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1621 if (info->tx_dma_ch_mask & BIT(i)) 1622 continue; 1623 1624 tx_ring = &rtwpci->tx_rings[i]; 1625 bd_ring = &tx_ring->bd_ring; 1626 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; 1627 addr_num = bd_ring->addr.num; 1628 addr_bdram = bd_ring->addr.bdram; 1629 addr_desa_l = bd_ring->addr.desa_l; 1630 bd_ring->wp = 0; 1631 bd_ring->rp = 0; 1632 1633 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1634 if (addr_bdram && bd_ram) { 1635 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1636 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1637 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1638 1639 rtw89_write32(rtwdev, addr_bdram, val32); 1640 } 1641 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1642 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1643 } 1644 1645 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1646 rx_ring = &rtwpci->rx_rings[i]; 1647 bd_ring = &rx_ring->bd_ring; 1648 addr_num = bd_ring->addr.num; 1649 addr_idx = bd_ring->addr.idx; 1650 addr_desa_l = bd_ring->addr.desa_l; 1651 if (info->rx_ring_eq_is_full) 1652 bd_ring->wp = bd_ring->len - 1; 1653 else 1654 bd_ring->wp = 0; 1655 bd_ring->rp = 0; 1656 rx_ring->diliver_skb = NULL; 1657 rx_ring->diliver_desc.ready = false; 1658 rx_ring->target_rx_tag = 0; 1659 1660 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1661 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1662 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1663 1664 if (info->rx_ring_eq_is_full) 1665 rtw89_write16(rtwdev, addr_idx, bd_ring->wp); 1666 } 1667 1668 rtw89_pci_init_wp_16sel(rtwdev); 1669 } 1670 1671 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1672 struct rtw89_pci_tx_ring *tx_ring) 1673 { 1674 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1675 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1676 } 1677 1678 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1679 { 1680 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1681 const struct rtw89_pci_info *info = rtwdev->pci_info; 1682 int txch; 1683 1684 rtw89_pci_reset_trx_rings(rtwdev); 1685 1686 spin_lock_bh(&rtwpci->trx_lock); 1687 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1688 if (info->tx_dma_ch_mask & BIT(txch)) 1689 continue; 1690 if (txch == RTW89_TXCH_CH12) { 1691 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1692 skb_queue_len(&rtwpci->h2c_queue), true); 1693 continue; 1694 } 1695 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1696 } 1697 spin_unlock_bh(&rtwpci->trx_lock); 1698 } 1699 1700 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1701 { 1702 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1703 unsigned long flags; 1704 1705 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1706 rtwpci->running = true; 1707 rtw89_chip_enable_intr(rtwdev, rtwpci); 1708 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1709 } 1710 1711 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1712 { 1713 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1714 unsigned long flags; 1715 1716 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1717 rtwpci->running = false; 1718 rtw89_chip_disable_intr(rtwdev, rtwpci); 1719 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1720 } 1721 1722 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1723 { 1724 rtw89_core_napi_start(rtwdev); 1725 rtw89_pci_enable_intr_lock(rtwdev); 1726 1727 return 0; 1728 } 1729 1730 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1731 { 1732 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1733 struct pci_dev *pdev = rtwpci->pdev; 1734 1735 rtw89_pci_disable_intr_lock(rtwdev); 1736 synchronize_irq(pdev->irq); 1737 rtw89_core_napi_stop(rtwdev); 1738 } 1739 1740 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1741 { 1742 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1743 struct pci_dev *pdev = rtwpci->pdev; 1744 1745 if (pause) { 1746 rtw89_pci_disable_intr_lock(rtwdev); 1747 synchronize_irq(pdev->irq); 1748 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1749 napi_synchronize(&rtwdev->napi); 1750 } else { 1751 rtw89_pci_enable_intr_lock(rtwdev); 1752 rtw89_pci_tx_kick_off_pending(rtwdev); 1753 } 1754 } 1755 1756 static 1757 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1758 { 1759 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1760 const struct rtw89_pci_info *info = rtwdev->pci_info; 1761 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1762 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1763 struct rtw89_pci_tx_ring *tx_ring; 1764 struct rtw89_pci_rx_ring *rx_ring; 1765 int i; 1766 1767 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1768 return; 1769 1770 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1771 tx_ring = &rtwpci->tx_rings[i]; 1772 tx_ring->bd_ring.addr.idx = low_power ? 1773 bd_idx_addr->tx_bd_addrs[i] : 1774 dma_addr_set->tx[i].idx; 1775 } 1776 1777 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1778 rx_ring = &rtwpci->rx_rings[i]; 1779 rx_ring->bd_ring.addr.idx = low_power ? 1780 bd_idx_addr->rx_bd_addrs[i] : 1781 dma_addr_set->rx[i].idx; 1782 } 1783 } 1784 1785 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1786 { 1787 enum rtw89_pci_intr_mask_cfg cfg; 1788 1789 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1790 1791 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1792 rtw89_chip_config_intr_mask(rtwdev, cfg); 1793 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1794 } 1795 1796 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1797 1798 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1799 { 1800 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1801 u32 val = readl(rtwpci->mmap + addr); 1802 int count; 1803 1804 for (count = 0; ; count++) { 1805 if (val != RTW89_R32_DEAD) 1806 return val; 1807 if (count >= MAC_REG_POOL_COUNT) { 1808 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1809 return RTW89_R32_DEAD; 1810 } 1811 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1812 val = readl(rtwpci->mmap + addr); 1813 } 1814 1815 return val; 1816 } 1817 1818 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1819 { 1820 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1821 u32 addr32, val32, shift; 1822 1823 if (!ACCESS_CMAC(addr)) 1824 return readb(rtwpci->mmap + addr); 1825 1826 addr32 = addr & ~0x3; 1827 shift = (addr & 0x3) * 8; 1828 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1829 return val32 >> shift; 1830 } 1831 1832 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1833 { 1834 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1835 u32 addr32, val32, shift; 1836 1837 if (!ACCESS_CMAC(addr)) 1838 return readw(rtwpci->mmap + addr); 1839 1840 addr32 = addr & ~0x3; 1841 shift = (addr & 0x3) * 8; 1842 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1843 return val32 >> shift; 1844 } 1845 1846 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1847 { 1848 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1849 1850 if (!ACCESS_CMAC(addr)) 1851 return readl(rtwpci->mmap + addr); 1852 1853 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1854 } 1855 1856 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1857 { 1858 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1859 1860 writeb(data, rtwpci->mmap + addr); 1861 } 1862 1863 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1864 { 1865 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1866 1867 writew(data, rtwpci->mmap + addr); 1868 } 1869 1870 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1871 { 1872 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1873 1874 writel(data, rtwpci->mmap + addr); 1875 } 1876 1877 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1878 { 1879 const struct rtw89_pci_info *info = rtwdev->pci_info; 1880 1881 if (enable) 1882 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1883 info->rxhci_en_bit | info->txhci_en_bit); 1884 else 1885 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1886 info->rxhci_en_bit | info->txhci_en_bit); 1887 } 1888 1889 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1890 { 1891 const struct rtw89_pci_info *info = rtwdev->pci_info; 1892 const struct rtw89_reg_def *reg = &info->dma_io_stop; 1893 1894 if (enable) 1895 rtw89_write32_clr(rtwdev, reg->addr, reg->mask); 1896 else 1897 rtw89_write32_set(rtwdev, reg->addr, reg->mask); 1898 } 1899 1900 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1901 { 1902 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1903 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1904 } 1905 1906 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1907 { 1908 u16 val; 1909 1910 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1911 1912 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1913 switch (speed) { 1914 case PCIE_PHY_GEN1: 1915 if (addr < 0x20) 1916 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1917 else 1918 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1919 break; 1920 case PCIE_PHY_GEN2: 1921 if (addr < 0x20) 1922 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1923 else 1924 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1925 break; 1926 default: 1927 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1928 return -EINVAL; 1929 } 1930 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1931 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1932 1933 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1934 false, rtwdev, R_AX_MDIO_CFG); 1935 } 1936 1937 static int 1938 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1939 { 1940 int ret; 1941 1942 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1943 if (ret) { 1944 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1945 return ret; 1946 } 1947 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1948 1949 return 0; 1950 } 1951 1952 static int 1953 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1954 { 1955 int ret; 1956 1957 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1958 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1959 if (ret) { 1960 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1961 return ret; 1962 } 1963 1964 return 0; 1965 } 1966 1967 static int 1968 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1969 { 1970 u32 shift; 1971 int ret; 1972 u16 val; 1973 1974 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1975 if (ret) 1976 return ret; 1977 1978 shift = __ffs(mask); 1979 val &= ~mask; 1980 val |= ((data << shift) & mask); 1981 1982 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1983 if (ret) 1984 return ret; 1985 1986 return 0; 1987 } 1988 1989 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1990 { 1991 int ret; 1992 u16 val; 1993 1994 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1995 if (ret) 1996 return ret; 1997 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1998 if (ret) 1999 return ret; 2000 2001 return 0; 2002 } 2003 2004 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2005 { 2006 int ret; 2007 u16 val; 2008 2009 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2010 if (ret) 2011 return ret; 2012 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 2013 if (ret) 2014 return ret; 2015 2016 return 0; 2017 } 2018 2019 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) 2020 { 2021 u16 addr_2lsb = addr & B_AX_DBI_2LSB; 2022 u16 write_addr; 2023 u8 flag; 2024 int ret; 2025 2026 write_addr = addr & B_AX_DBI_ADDR_MSK; 2027 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK); 2028 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data); 2029 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); 2030 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); 2031 2032 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2033 10 * RTW89_PCI_WR_RETRY_CNT, false, 2034 rtwdev, R_AX_DBI_FLAG + 2); 2035 if (ret) 2036 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n", 2037 addr); 2038 2039 return ret; 2040 } 2041 2042 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) 2043 { 2044 u16 read_addr = addr & B_AX_DBI_ADDR_MSK; 2045 u8 flag; 2046 int ret; 2047 2048 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); 2049 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); 2050 2051 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2052 10 * RTW89_PCI_WR_RETRY_CNT, false, 2053 rtwdev, R_AX_DBI_FLAG + 2); 2054 if (ret) { 2055 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n", 2056 addr); 2057 return ret; 2058 } 2059 2060 read_addr = R_AX_DBI_RDATA + (addr & 3); 2061 *value = rtw89_read8(rtwdev, read_addr); 2062 2063 return 0; 2064 } 2065 2066 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2067 u8 data) 2068 { 2069 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2070 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2071 struct pci_dev *pdev = rtwpci->pdev; 2072 int ret; 2073 2074 ret = pci_write_config_byte(pdev, addr, data); 2075 if (!ret) 2076 return 0; 2077 2078 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2079 ret = rtw89_dbi_write8(rtwdev, addr, data); 2080 2081 return ret; 2082 } 2083 2084 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2085 u8 *value) 2086 { 2087 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2088 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2089 struct pci_dev *pdev = rtwpci->pdev; 2090 int ret; 2091 2092 ret = pci_read_config_byte(pdev, addr, value); 2093 if (!ret) 2094 return 0; 2095 2096 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2097 ret = rtw89_dbi_read8(rtwdev, addr, value); 2098 2099 return ret; 2100 } 2101 2102 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 2103 u8 bit) 2104 { 2105 u8 value; 2106 int ret; 2107 2108 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2109 if (ret) 2110 return ret; 2111 2112 value |= bit; 2113 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2114 2115 return ret; 2116 } 2117 2118 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 2119 u8 bit) 2120 { 2121 u8 value; 2122 int ret; 2123 2124 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2125 if (ret) 2126 return ret; 2127 2128 value &= ~bit; 2129 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2130 2131 return ret; 2132 } 2133 2134 static int 2135 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 2136 { 2137 u16 val, tar; 2138 int ret; 2139 2140 /* Enable counter */ 2141 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 2142 if (ret) 2143 return ret; 2144 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2145 phy_rate); 2146 if (ret) 2147 return ret; 2148 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 2149 phy_rate); 2150 if (ret) 2151 return ret; 2152 2153 fsleep(300); 2154 2155 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 2156 if (ret) 2157 return ret; 2158 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2159 phy_rate); 2160 if (ret) 2161 return ret; 2162 2163 tar = tar & 0x0FFF; 2164 if (tar == 0 || tar == 0x0FFF) { 2165 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 2166 return -EINVAL; 2167 } 2168 2169 *target = tar; 2170 2171 return 0; 2172 } 2173 2174 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 2175 { 2176 int ret; 2177 2178 if (!rtw89_is_rtl885xb(rtwdev)) 2179 return 0; 2180 2181 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 2182 PCIE_AUTOK_4, PCIE_PHY_GEN1); 2183 return ret; 2184 } 2185 2186 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 2187 { 2188 enum rtw89_pcie_phy phy_rate; 2189 u16 val16, mgn_set, div_set, tar; 2190 u8 val8, bdr_ori; 2191 bool l1_flag = false; 2192 int ret = 0; 2193 2194 if (!rtw89_is_rtl885xb(rtwdev)) 2195 return 0; 2196 2197 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 2198 if (ret) { 2199 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 2200 RTW89_PCIE_PHY_RATE); 2201 return ret; 2202 } 2203 2204 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 2205 phy_rate = PCIE_PHY_GEN1; 2206 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 2207 phy_rate = PCIE_PHY_GEN2; 2208 } else { 2209 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 2210 return -EOPNOTSUPP; 2211 } 2212 /* Disable L1BD */ 2213 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 2214 if (ret) { 2215 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 2216 return ret; 2217 } 2218 2219 if (bdr_ori & RTW89_PCIE_BIT_L1) { 2220 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2221 bdr_ori & ~RTW89_PCIE_BIT_L1); 2222 if (ret) { 2223 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2224 RTW89_PCIE_L1_CTRL); 2225 return ret; 2226 } 2227 l1_flag = true; 2228 } 2229 2230 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2231 if (ret) { 2232 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2233 goto end; 2234 } 2235 2236 if (val16 & B_AX_CALIB_EN) { 2237 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 2238 val16 & ~B_AX_CALIB_EN, phy_rate); 2239 if (ret) { 2240 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2241 goto end; 2242 } 2243 } 2244 2245 if (!autook_en) 2246 goto end; 2247 /* Set div */ 2248 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 2249 if (ret) { 2250 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2251 goto end; 2252 } 2253 2254 /* Obtain div and margin */ 2255 ret = __get_target(rtwdev, &tar, phy_rate); 2256 if (ret) { 2257 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2258 goto end; 2259 } 2260 2261 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2262 2263 if (mgn_set >= 128) { 2264 div_set = 0x0003; 2265 mgn_set = 0x000F; 2266 } else if (mgn_set >= 64) { 2267 div_set = 0x0003; 2268 mgn_set >>= 3; 2269 } else if (mgn_set >= 32) { 2270 div_set = 0x0002; 2271 mgn_set >>= 2; 2272 } else if (mgn_set >= 16) { 2273 div_set = 0x0001; 2274 mgn_set >>= 1; 2275 } else if (mgn_set == 0) { 2276 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2277 goto end; 2278 } else { 2279 div_set = 0x0000; 2280 } 2281 2282 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2283 if (ret) { 2284 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2285 goto end; 2286 } 2287 2288 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2289 2290 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2291 if (ret) { 2292 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2293 goto end; 2294 } 2295 2296 ret = __get_target(rtwdev, &tar, phy_rate); 2297 if (ret) { 2298 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2299 goto end; 2300 } 2301 2302 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2303 tar, div_set, mgn_set); 2304 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2305 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2306 if (ret) { 2307 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2308 goto end; 2309 } 2310 2311 /* Enable function */ 2312 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2313 if (ret) { 2314 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2315 goto end; 2316 } 2317 2318 /* CLK delay = 0 */ 2319 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2320 PCIE_CLKDLY_HW_0); 2321 2322 end: 2323 /* Set L1BD to ori */ 2324 if (l1_flag) { 2325 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2326 bdr_ori); 2327 if (ret) { 2328 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2329 RTW89_PCIE_L1_CTRL); 2330 return ret; 2331 } 2332 } 2333 2334 return ret; 2335 } 2336 2337 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2338 { 2339 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2340 int ret; 2341 2342 if (chip_id == RTL8852A) { 2343 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2344 PCIE_PHY_GEN1); 2345 if (ret) 2346 return ret; 2347 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2348 PCIE_PHY_GEN2); 2349 if (ret) 2350 return ret; 2351 } else if (chip_id == RTL8852C) { 2352 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2353 B_AX_DEGLITCH); 2354 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2355 B_AX_DEGLITCH); 2356 } 2357 2358 return 0; 2359 } 2360 2361 static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev) 2362 { 2363 u16 g1_oobs, g2_oobs; 2364 u32 backup_aspm; 2365 u32 phy_offset; 2366 u16 offset_cal; 2367 u16 oobs_val; 2368 int ret; 2369 u8 gen; 2370 2371 if (rtwdev->chip->chip_id != RTL8852C) 2372 return; 2373 2374 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2375 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2376 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + 2377 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2378 if (g1_oobs && g2_oobs) 2379 return; 2380 2381 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); 2382 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 2383 2384 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); 2385 if (ret) 2386 goto out; 2387 2388 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN); 2389 rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); 2390 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); 2391 2392 oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, 2393 OOBS_LEVEL_MASK); 2394 2395 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, 2396 OOBS_SEN_MASK, oobs_val); 2397 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, 2398 BAC_OOBS_SEL); 2399 2400 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, 2401 OOBS_SEN_MASK, oobs_val); 2402 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, 2403 BAC_OOBS_SEL); 2404 2405 /* offset K */ 2406 for (gen = 1; gen <= 2; gen++) { 2407 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 : 2408 R_RAC_DIRECT_OFFSET_G2; 2409 2410 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 2411 B_PCIE_BIT_RD_SEL); 2412 } 2413 2414 offset_cal = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2415 RAC_ANA1F * RAC_MULT, OFFSET_CAL_MASK); 2416 2417 for (gen = 1; gen <= 2; gen++) { 2418 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 : 2419 R_RAC_DIRECT_OFFSET_G2; 2420 2421 rtw89_write16_mask(rtwdev, phy_offset + RAC_ANA0B * RAC_MULT, 2422 MANUAL_LVL_MASK, offset_cal); 2423 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, 2424 OFFSET_CAL_MODE); 2425 } 2426 2427 out: 2428 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm); 2429 } 2430 2431 static void rtw89_pci_ber(struct rtw89_dev *rtwdev) 2432 { 2433 u32 phy_offset; 2434 2435 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks)) 2436 return; 2437 2438 phy_offset = R_RAC_DIRECT_OFFSET_G1; 2439 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL); 2440 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2441 2442 phy_offset = R_RAC_DIRECT_OFFSET_G2; 2443 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL); 2444 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2445 } 2446 2447 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2448 { 2449 if (rtwdev->chip->chip_id != RTL8852A) 2450 return; 2451 2452 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2453 } 2454 2455 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2456 { 2457 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2458 2459 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2460 return; 2461 2462 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2463 } 2464 2465 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2466 { 2467 int ret; 2468 2469 if (rtwdev->chip->chip_id != RTL8852A) 2470 return 0; 2471 2472 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2473 PCIE_PHY_GEN1); 2474 if (ret) 2475 return ret; 2476 2477 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2478 PCIE_PHY_GEN2); 2479 if (ret) 2480 return ret; 2481 2482 return 0; 2483 } 2484 2485 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2486 { 2487 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2488 2489 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2490 return; 2491 2492 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2493 } 2494 2495 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2496 { 2497 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2498 2499 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2500 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2501 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2502 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2503 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2504 } else if (rtwdev->chip->chip_id == RTL8852C) { 2505 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2506 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2507 } 2508 } 2509 2510 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2511 { 2512 if (!rtw89_is_rtl885xb(rtwdev)) 2513 return 0; 2514 2515 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2516 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2517 } 2518 2519 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2520 { 2521 if (pwr_up) 2522 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2523 else 2524 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2525 } 2526 2527 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2528 { 2529 if (rtwdev->chip->chip_id != RTL8852C) 2530 return; 2531 2532 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2533 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2534 } 2535 2536 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2537 { 2538 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2539 return; 2540 2541 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2542 } 2543 2544 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2545 { 2546 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2547 return; 2548 2549 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2550 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2551 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2552 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2553 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2554 } 2555 2556 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2557 { 2558 if (rtwdev->chip->chip_id != RTL8852C) 2559 return; 2560 2561 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2562 } 2563 2564 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2565 { 2566 if (rtwdev->chip->chip_id != RTL8852C) 2567 return; 2568 2569 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2570 } 2571 2572 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2573 { 2574 if (rtwdev->chip->chip_id == RTL8852C) 2575 return; 2576 2577 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2578 B_AX_SIC_EN_FORCE_CLKREQ); 2579 } 2580 2581 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2582 { 2583 const struct rtw89_pci_info *info = rtwdev->pci_info; 2584 u32 lbc; 2585 2586 if (rtwdev->chip->chip_id == RTL8852C) 2587 return; 2588 2589 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2590 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2591 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2592 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2593 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2594 } else { 2595 lbc &= ~B_AX_LBC_EN; 2596 } 2597 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2598 } 2599 2600 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2601 { 2602 const struct rtw89_pci_info *info = rtwdev->pci_info; 2603 u32 val32; 2604 2605 if (rtwdev->chip->chip_id != RTL8852C) 2606 return; 2607 2608 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2609 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2610 info->io_rcy_tmr); 2611 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2612 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2613 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2614 2615 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2616 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2617 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2618 } else { 2619 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2620 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2621 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2622 } 2623 2624 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2625 } 2626 2627 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2628 { 2629 if (rtwdev->chip->chip_id == RTL8852C) 2630 return; 2631 2632 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2633 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2634 2635 if (rtwdev->chip->chip_id == RTL8852A) 2636 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2637 B_AX_EN_CHKDSC_NO_RX_STUCK); 2638 } 2639 2640 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2641 { 2642 if (rtwdev->chip->chip_id == RTL8852C) 2643 return; 2644 2645 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2646 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2647 } 2648 2649 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) 2650 { 2651 const struct rtw89_pci_info *info = rtwdev->pci_info; 2652 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2653 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2654 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2655 B_AX_CLR_CH12_IDX; 2656 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2657 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2658 2659 if (chip_id == RTL8852A || chip_id == RTL8852C) 2660 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2661 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2662 /* clear DMA indexes */ 2663 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2664 if (chip_id == RTL8852A || chip_id == RTL8852C) 2665 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2666 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2667 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2668 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2669 } 2670 2671 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2672 { 2673 const struct rtw89_pci_info *info = rtwdev->pci_info; 2674 u32 dma_busy1 = info->dma_busy1.addr; 2675 u32 dma_busy2 = info->dma_busy2_reg; 2676 u32 check, dma_busy; 2677 int ret; 2678 2679 check = info->dma_busy1.mask; 2680 2681 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2682 10, 100, false, rtwdev, dma_busy1); 2683 if (ret) 2684 return ret; 2685 2686 if (!dma_busy2) 2687 return 0; 2688 2689 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2690 2691 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2692 10, 100, false, rtwdev, dma_busy2); 2693 if (ret) 2694 return ret; 2695 2696 return 0; 2697 } 2698 2699 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2700 { 2701 const struct rtw89_pci_info *info = rtwdev->pci_info; 2702 u32 dma_busy3 = info->dma_busy3_reg; 2703 u32 check, dma_busy; 2704 int ret; 2705 2706 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2707 2708 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2709 10, 100, false, rtwdev, dma_busy3); 2710 if (ret) 2711 return ret; 2712 2713 return 0; 2714 } 2715 2716 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2717 { 2718 u32 ret; 2719 2720 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev); 2721 if (ret) { 2722 rtw89_err(rtwdev, "txdma ch busy\n"); 2723 return ret; 2724 } 2725 2726 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev); 2727 if (ret) { 2728 rtw89_err(rtwdev, "rxdma ch busy\n"); 2729 return ret; 2730 } 2731 2732 return 0; 2733 } 2734 2735 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2736 { 2737 const struct rtw89_pci_info *info = rtwdev->pci_info; 2738 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2739 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2740 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2741 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2742 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2743 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2744 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2745 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2746 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2747 u8 cv = rtwdev->hal.cv; 2748 u32 val32; 2749 2750 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2751 if (chip_id == RTL8852A && cv == CHIP_CBV) 2752 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2753 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2754 if (chip_id == RTL8852A || chip_id == RTL8852B) 2755 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2756 } 2757 2758 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2759 if (chip_id == RTL8852A && cv == CHIP_CBV) 2760 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2761 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2762 if (chip_id == RTL8852A || chip_id == RTL8852B) 2763 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2764 } 2765 2766 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2767 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2768 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2769 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2770 2771 if (chip_id == RTL8852A || chip_id == RTL8852B) 2772 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2773 B_AX_PCIE_RX_APPLEN_MASK, 0); 2774 } 2775 2776 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2777 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2778 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2779 } else if (chip_id == RTL8852C) { 2780 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2781 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2782 } 2783 2784 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2785 if (tag_mode == MAC_AX_TAG_SGL) { 2786 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2787 ~B_AX_LATENCY_CONTROL; 2788 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2789 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2790 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2791 B_AX_LATENCY_CONTROL; 2792 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2793 } 2794 } 2795 2796 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2797 info->multi_tag_num); 2798 2799 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2800 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2801 wd_dma_idle_intvl); 2802 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2803 wd_dma_act_intvl); 2804 } else if (chip_id == RTL8852C) { 2805 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2806 wd_dma_idle_intvl); 2807 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2808 wd_dma_act_intvl); 2809 } 2810 2811 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2812 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2813 B_AX_HOST_ADDR_INFO_8B_SEL); 2814 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2815 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2816 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2817 B_AX_HOST_ADDR_INFO_8B_SEL); 2818 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2819 } 2820 2821 return 0; 2822 } 2823 2824 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2825 { 2826 const struct rtw89_pci_info *info = rtwdev->pci_info; 2827 2828 if (rtwdev->chip->chip_id == RTL8852A) { 2829 /* ltr sw trigger */ 2830 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2831 } 2832 info->ltr_set(rtwdev, false); 2833 rtw89_pci_ctrl_dma_all(rtwdev, false); 2834 rtw89_pci_clr_idx_all(rtwdev); 2835 2836 return 0; 2837 } 2838 2839 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) 2840 { 2841 const struct rtw89_pci_info *info = rtwdev->pci_info; 2842 int ret; 2843 2844 rtw89_pci_ber(rtwdev); 2845 rtw89_pci_rxdma_prefth(rtwdev); 2846 rtw89_pci_l1off_pwroff(rtwdev); 2847 rtw89_pci_deglitch_setting(rtwdev); 2848 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2849 if (ret) { 2850 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2851 return ret; 2852 } 2853 2854 rtw89_pci_aphy_pwrcut(rtwdev); 2855 rtw89_pci_hci_ldo(rtwdev); 2856 rtw89_pci_dphy_delay(rtwdev); 2857 2858 ret = rtw89_pci_autok_x(rtwdev); 2859 if (ret) { 2860 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2861 return ret; 2862 } 2863 2864 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2865 if (ret) { 2866 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2867 return ret; 2868 } 2869 2870 rtw89_pci_power_wake(rtwdev, true); 2871 rtw89_pci_autoload_hang(rtwdev); 2872 rtw89_pci_l12_vmain(rtwdev); 2873 rtw89_pci_gen2_force_ib(rtwdev); 2874 rtw89_pci_l1_ent_lat(rtwdev); 2875 rtw89_pci_wd_exit_l1(rtwdev); 2876 rtw89_pci_set_sic(rtwdev); 2877 rtw89_pci_set_lbc(rtwdev); 2878 rtw89_pci_set_io_rcy(rtwdev); 2879 rtw89_pci_set_dbg(rtwdev); 2880 rtw89_pci_set_keep_reg(rtwdev); 2881 2882 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2883 2884 /* stop DMA activities */ 2885 rtw89_pci_ctrl_dma_all(rtwdev, false); 2886 2887 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2888 if (ret) { 2889 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2890 return ret; 2891 } 2892 2893 rtw89_pci_clr_idx_all(rtwdev); 2894 rtw89_pci_mode_op(rtwdev); 2895 2896 /* fill TRX BD indexes */ 2897 rtw89_pci_ops_reset(rtwdev); 2898 2899 ret = rtw89_pci_rst_bdram_ax(rtwdev); 2900 if (ret) { 2901 rtw89_warn(rtwdev, "reset bdram busy\n"); 2902 return ret; 2903 } 2904 2905 /* disable all channels except to FW CMD channel to download firmware */ 2906 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false); 2907 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true); 2908 2909 /* start DMA activities */ 2910 rtw89_pci_ctrl_dma_all(rtwdev, true); 2911 2912 return 0; 2913 } 2914 2915 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2916 { 2917 u32 val; 2918 2919 if (!en) 2920 return 0; 2921 2922 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2923 if (rtw89_pci_ltr_is_err_reg_val(val)) 2924 return -EINVAL; 2925 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2926 if (rtw89_pci_ltr_is_err_reg_val(val)) 2927 return -EINVAL; 2928 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2929 if (rtw89_pci_ltr_is_err_reg_val(val)) 2930 return -EINVAL; 2931 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2932 if (rtw89_pci_ltr_is_err_reg_val(val)) 2933 return -EINVAL; 2934 2935 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 2936 B_AX_LTR_WD_NOEMP_CHK); 2937 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2938 PCI_LTR_SPC_500US); 2939 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2940 PCI_LTR_IDLE_TIMER_3_2MS); 2941 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2942 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2943 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 2944 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2945 2946 return 0; 2947 } 2948 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2949 2950 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2951 { 2952 u32 dec_ctrl; 2953 u32 val32; 2954 2955 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2956 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2957 return -EINVAL; 2958 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2959 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2960 return -EINVAL; 2961 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2962 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2963 return -EINVAL; 2964 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2965 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2966 return -EINVAL; 2967 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2968 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2969 return -EINVAL; 2970 2971 if (!en) { 2972 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2973 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2974 B_AX_LTR_REQ_DRV; 2975 } else { 2976 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2977 } 2978 2979 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2980 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2981 2982 if (en) 2983 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2984 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2985 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2986 PCI_LTR_IDLE_TIMER_3_2MS); 2987 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2988 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2989 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2990 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2991 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2992 2993 return 0; 2994 } 2995 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2996 2997 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) 2998 { 2999 const struct rtw89_pci_info *info = rtwdev->pci_info; 3000 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3001 int ret; 3002 3003 ret = info->ltr_set(rtwdev, true); 3004 if (ret) { 3005 rtw89_err(rtwdev, "pci ltr set fail\n"); 3006 return ret; 3007 } 3008 if (chip_id == RTL8852A) { 3009 /* ltr sw trigger */ 3010 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 3011 } 3012 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3013 /* ADDR info 8-byte mode */ 3014 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 3015 B_AX_HOST_ADDR_INFO_8B_SEL); 3016 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 3017 } 3018 3019 /* enable DMA for all queues */ 3020 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true); 3021 3022 /* Release PCI IO */ 3023 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 3024 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 3025 3026 return 0; 3027 } 3028 3029 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 3030 struct pci_dev *pdev) 3031 { 3032 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3033 int ret; 3034 3035 ret = pci_enable_device(pdev); 3036 if (ret) { 3037 rtw89_err(rtwdev, "failed to enable pci device\n"); 3038 return ret; 3039 } 3040 3041 pci_set_master(pdev); 3042 pci_set_drvdata(pdev, rtwdev->hw); 3043 3044 rtwpci->pdev = pdev; 3045 3046 return 0; 3047 } 3048 3049 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 3050 struct pci_dev *pdev) 3051 { 3052 pci_disable_device(pdev); 3053 } 3054 3055 static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev) 3056 { 3057 const struct rtw89_chip_info *chip = rtwdev->chip; 3058 3059 switch (chip->chip_id) { 3060 case RTL8852A: 3061 case RTL8852B: 3062 case RTL8851B: 3063 case RTL8852BT: 3064 return true; 3065 default: 3066 return false; 3067 } 3068 } 3069 3070 static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev) 3071 { 3072 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3073 struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev); 3074 3075 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3076 return true; 3077 3078 if (!bridge) 3079 return false; 3080 3081 switch (bridge->vendor) { 3082 case PCI_VENDOR_ID_INTEL: 3083 return true; 3084 case PCI_VENDOR_ID_ASMEDIA: 3085 if (bridge->device == 0x2806) 3086 return true; 3087 break; 3088 } 3089 3090 return false; 3091 } 3092 3093 static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) 3094 { 3095 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3096 3097 if (!rtwpci->enable_dac) 3098 return; 3099 3100 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3101 return; 3102 3103 rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS); 3104 } 3105 3106 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 3107 struct pci_dev *pdev) 3108 { 3109 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3110 unsigned long resource_len; 3111 u8 bar_id = 2; 3112 int ret; 3113 3114 ret = pci_request_regions(pdev, KBUILD_MODNAME); 3115 if (ret) { 3116 rtw89_err(rtwdev, "failed to request pci regions\n"); 3117 goto err; 3118 } 3119 3120 if (!rtw89_pci_is_dac_compatible_bridge(rtwdev)) 3121 goto no_dac; 3122 3123 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); 3124 if (!ret) { 3125 rtwpci->enable_dac = true; 3126 rtw89_pci_cfg_dac(rtwdev); 3127 } else { 3128 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3129 if (ret) { 3130 rtw89_err(rtwdev, 3131 "failed to set dma and consistent mask to 32/36-bit\n"); 3132 goto err_release_regions; 3133 } 3134 } 3135 no_dac: 3136 3137 resource_len = pci_resource_len(pdev, bar_id); 3138 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 3139 if (!rtwpci->mmap) { 3140 rtw89_err(rtwdev, "failed to map pci io\n"); 3141 ret = -EIO; 3142 goto err_release_regions; 3143 } 3144 3145 return 0; 3146 3147 err_release_regions: 3148 pci_release_regions(pdev); 3149 err: 3150 return ret; 3151 } 3152 3153 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 3154 struct pci_dev *pdev) 3155 { 3156 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3157 3158 if (rtwpci->mmap) { 3159 pci_iounmap(pdev, rtwpci->mmap); 3160 pci_release_regions(pdev); 3161 } 3162 } 3163 3164 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 3165 struct pci_dev *pdev, 3166 struct rtw89_pci_tx_ring *tx_ring) 3167 { 3168 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3169 u8 *head = wd_ring->head; 3170 dma_addr_t dma = wd_ring->dma; 3171 u32 page_size = wd_ring->page_size; 3172 u32 page_num = wd_ring->page_num; 3173 u32 ring_sz = page_size * page_num; 3174 3175 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3176 wd_ring->head = NULL; 3177 } 3178 3179 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 3180 struct pci_dev *pdev, 3181 struct rtw89_pci_tx_ring *tx_ring) 3182 { 3183 int ring_sz; 3184 u8 *head; 3185 dma_addr_t dma; 3186 3187 head = tx_ring->bd_ring.head; 3188 dma = tx_ring->bd_ring.dma; 3189 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 3190 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3191 3192 tx_ring->bd_ring.head = NULL; 3193 } 3194 3195 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 3196 struct pci_dev *pdev) 3197 { 3198 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3199 const struct rtw89_pci_info *info = rtwdev->pci_info; 3200 struct rtw89_pci_tx_ring *tx_ring; 3201 int i; 3202 3203 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3204 if (info->tx_dma_ch_mask & BIT(i)) 3205 continue; 3206 tx_ring = &rtwpci->tx_rings[i]; 3207 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3208 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3209 } 3210 } 3211 3212 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 3213 struct pci_dev *pdev, 3214 struct rtw89_pci_rx_ring *rx_ring) 3215 { 3216 struct rtw89_pci_rx_info *rx_info; 3217 struct sk_buff *skb; 3218 dma_addr_t dma; 3219 u32 buf_sz; 3220 u8 *head; 3221 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 3222 int i; 3223 3224 buf_sz = rx_ring->buf_sz; 3225 for (i = 0; i < rx_ring->bd_ring.len; i++) { 3226 skb = rx_ring->buf[i]; 3227 if (!skb) 3228 continue; 3229 3230 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3231 dma = rx_info->dma; 3232 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3233 dev_kfree_skb(skb); 3234 rx_ring->buf[i] = NULL; 3235 } 3236 3237 head = rx_ring->bd_ring.head; 3238 dma = rx_ring->bd_ring.dma; 3239 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3240 3241 rx_ring->bd_ring.head = NULL; 3242 } 3243 3244 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 3245 struct pci_dev *pdev) 3246 { 3247 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3248 struct rtw89_pci_rx_ring *rx_ring; 3249 int i; 3250 3251 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3252 rx_ring = &rtwpci->rx_rings[i]; 3253 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3254 } 3255 } 3256 3257 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 3258 struct pci_dev *pdev) 3259 { 3260 rtw89_pci_free_rx_rings(rtwdev, pdev); 3261 rtw89_pci_free_tx_rings(rtwdev, pdev); 3262 } 3263 3264 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 3265 struct rtw89_pci_rx_ring *rx_ring, 3266 struct sk_buff *skb, int buf_sz, u32 idx) 3267 { 3268 struct rtw89_pci_rx_info *rx_info; 3269 struct rtw89_pci_rx_bd_32 *rx_bd; 3270 dma_addr_t dma; 3271 3272 if (!skb) 3273 return -EINVAL; 3274 3275 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 3276 if (dma_mapping_error(&pdev->dev, dma)) 3277 return -EBUSY; 3278 3279 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3280 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 3281 3282 memset(rx_bd, 0, sizeof(*rx_bd)); 3283 rx_bd->buf_size = cpu_to_le16(buf_sz); 3284 rx_bd->dma = cpu_to_le32(dma); 3285 rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI); 3286 rx_info->dma = dma; 3287 3288 return 0; 3289 } 3290 3291 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 3292 struct pci_dev *pdev, 3293 struct rtw89_pci_tx_ring *tx_ring, 3294 enum rtw89_tx_channel txch) 3295 { 3296 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3297 struct rtw89_pci_tx_wd *txwd; 3298 dma_addr_t dma; 3299 dma_addr_t cur_paddr; 3300 u8 *head; 3301 u8 *cur_vaddr; 3302 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 3303 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 3304 u32 ring_sz = page_size * page_num; 3305 u32 page_offset; 3306 int i; 3307 3308 /* FWCMD queue doesn't use txwd as pages */ 3309 if (txch == RTW89_TXCH_CH12) 3310 return 0; 3311 3312 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3313 if (!head) 3314 return -ENOMEM; 3315 3316 INIT_LIST_HEAD(&wd_ring->free_pages); 3317 wd_ring->head = head; 3318 wd_ring->dma = dma; 3319 wd_ring->page_size = page_size; 3320 wd_ring->page_num = page_num; 3321 3322 page_offset = 0; 3323 for (i = 0; i < page_num; i++) { 3324 txwd = &wd_ring->pages[i]; 3325 cur_paddr = dma + page_offset; 3326 cur_vaddr = head + page_offset; 3327 3328 skb_queue_head_init(&txwd->queue); 3329 INIT_LIST_HEAD(&txwd->list); 3330 txwd->paddr = cur_paddr; 3331 txwd->vaddr = cur_vaddr; 3332 txwd->len = page_size; 3333 txwd->seq = i; 3334 rtw89_pci_enqueue_txwd(tx_ring, txwd); 3335 3336 page_offset += page_size; 3337 } 3338 3339 return 0; 3340 } 3341 3342 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 3343 struct pci_dev *pdev, 3344 struct rtw89_pci_tx_ring *tx_ring, 3345 u32 desc_size, u32 len, 3346 enum rtw89_tx_channel txch) 3347 { 3348 const struct rtw89_pci_ch_dma_addr *txch_addr; 3349 int ring_sz = desc_size * len; 3350 u8 *head; 3351 dma_addr_t dma; 3352 int ret; 3353 3354 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 3355 if (ret) { 3356 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 3357 goto err; 3358 } 3359 3360 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3361 if (ret) { 3362 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3363 goto err_free_wd_ring; 3364 } 3365 3366 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3367 if (!head) { 3368 ret = -ENOMEM; 3369 goto err_free_wd_ring; 3370 } 3371 3372 INIT_LIST_HEAD(&tx_ring->busy_pages); 3373 tx_ring->bd_ring.head = head; 3374 tx_ring->bd_ring.dma = dma; 3375 tx_ring->bd_ring.len = len; 3376 tx_ring->bd_ring.desc_size = desc_size; 3377 tx_ring->bd_ring.addr = *txch_addr; 3378 tx_ring->bd_ring.wp = 0; 3379 tx_ring->bd_ring.rp = 0; 3380 tx_ring->txch = txch; 3381 3382 return 0; 3383 3384 err_free_wd_ring: 3385 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3386 err: 3387 return ret; 3388 } 3389 3390 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 3391 struct pci_dev *pdev) 3392 { 3393 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3394 const struct rtw89_pci_info *info = rtwdev->pci_info; 3395 struct rtw89_pci_tx_ring *tx_ring; 3396 u32 desc_size; 3397 u32 len; 3398 u32 i, tx_allocated; 3399 int ret; 3400 3401 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3402 if (info->tx_dma_ch_mask & BIT(i)) 3403 continue; 3404 tx_ring = &rtwpci->tx_rings[i]; 3405 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3406 len = RTW89_PCI_TXBD_NUM_MAX; 3407 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3408 desc_size, len, i); 3409 if (ret) { 3410 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3411 goto err_free; 3412 } 3413 } 3414 3415 return 0; 3416 3417 err_free: 3418 tx_allocated = i; 3419 for (i = 0; i < tx_allocated; i++) { 3420 tx_ring = &rtwpci->tx_rings[i]; 3421 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3422 } 3423 3424 return ret; 3425 } 3426 3427 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3428 struct pci_dev *pdev, 3429 struct rtw89_pci_rx_ring *rx_ring, 3430 u32 desc_size, u32 len, u32 rxch) 3431 { 3432 const struct rtw89_pci_info *info = rtwdev->pci_info; 3433 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3434 struct sk_buff *skb; 3435 u8 *head; 3436 dma_addr_t dma; 3437 int ring_sz = desc_size * len; 3438 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3439 int i, allocated; 3440 int ret; 3441 3442 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3443 if (ret) { 3444 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3445 return ret; 3446 } 3447 3448 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3449 if (!head) { 3450 ret = -ENOMEM; 3451 goto err; 3452 } 3453 3454 rx_ring->bd_ring.head = head; 3455 rx_ring->bd_ring.dma = dma; 3456 rx_ring->bd_ring.len = len; 3457 rx_ring->bd_ring.desc_size = desc_size; 3458 rx_ring->bd_ring.addr = *rxch_addr; 3459 if (info->rx_ring_eq_is_full) 3460 rx_ring->bd_ring.wp = len - 1; 3461 else 3462 rx_ring->bd_ring.wp = 0; 3463 rx_ring->bd_ring.rp = 0; 3464 rx_ring->buf_sz = buf_sz; 3465 rx_ring->diliver_skb = NULL; 3466 rx_ring->diliver_desc.ready = false; 3467 rx_ring->target_rx_tag = 0; 3468 3469 for (i = 0; i < len; i++) { 3470 skb = dev_alloc_skb(buf_sz); 3471 if (!skb) { 3472 ret = -ENOMEM; 3473 goto err_free; 3474 } 3475 3476 memset(skb->data, 0, buf_sz); 3477 rx_ring->buf[i] = skb; 3478 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3479 buf_sz, i); 3480 if (ret) { 3481 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3482 dev_kfree_skb_any(skb); 3483 rx_ring->buf[i] = NULL; 3484 goto err_free; 3485 } 3486 } 3487 3488 return 0; 3489 3490 err_free: 3491 allocated = i; 3492 for (i = 0; i < allocated; i++) { 3493 skb = rx_ring->buf[i]; 3494 if (!skb) 3495 continue; 3496 dma = *((dma_addr_t *)skb->cb); 3497 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3498 dev_kfree_skb(skb); 3499 rx_ring->buf[i] = NULL; 3500 } 3501 3502 head = rx_ring->bd_ring.head; 3503 dma = rx_ring->bd_ring.dma; 3504 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3505 3506 rx_ring->bd_ring.head = NULL; 3507 err: 3508 return ret; 3509 } 3510 3511 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3512 struct pci_dev *pdev) 3513 { 3514 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3515 struct rtw89_pci_rx_ring *rx_ring; 3516 u32 desc_size; 3517 u32 len; 3518 int i, rx_allocated; 3519 int ret; 3520 3521 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3522 rx_ring = &rtwpci->rx_rings[i]; 3523 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3524 len = RTW89_PCI_RXBD_NUM_MAX; 3525 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3526 desc_size, len, i); 3527 if (ret) { 3528 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3529 goto err_free; 3530 } 3531 } 3532 3533 return 0; 3534 3535 err_free: 3536 rx_allocated = i; 3537 for (i = 0; i < rx_allocated; i++) { 3538 rx_ring = &rtwpci->rx_rings[i]; 3539 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3540 } 3541 3542 return ret; 3543 } 3544 3545 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3546 struct pci_dev *pdev) 3547 { 3548 int ret; 3549 3550 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3551 if (ret) { 3552 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3553 goto err; 3554 } 3555 3556 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3557 if (ret) { 3558 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3559 goto err_free_tx_rings; 3560 } 3561 3562 return 0; 3563 3564 err_free_tx_rings: 3565 rtw89_pci_free_tx_rings(rtwdev, pdev); 3566 err: 3567 return ret; 3568 } 3569 3570 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3571 struct rtw89_pci *rtwpci) 3572 { 3573 skb_queue_head_init(&rtwpci->h2c_queue); 3574 skb_queue_head_init(&rtwpci->h2c_release_queue); 3575 } 3576 3577 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3578 struct pci_dev *pdev) 3579 { 3580 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3581 int ret; 3582 3583 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3584 if (ret) { 3585 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3586 goto err; 3587 } 3588 3589 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3590 if (ret) { 3591 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3592 goto err_pci_unmap; 3593 } 3594 3595 rtw89_pci_h2c_init(rtwdev, rtwpci); 3596 3597 spin_lock_init(&rtwpci->irq_lock); 3598 spin_lock_init(&rtwpci->trx_lock); 3599 3600 return 0; 3601 3602 err_pci_unmap: 3603 rtw89_pci_clear_mapping(rtwdev, pdev); 3604 err: 3605 return ret; 3606 } 3607 3608 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3609 struct pci_dev *pdev) 3610 { 3611 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3612 3613 rtw89_pci_free_trx_rings(rtwdev, pdev); 3614 rtw89_pci_clear_mapping(rtwdev, pdev); 3615 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3616 skb_queue_len(&rtwpci->h2c_queue), true); 3617 } 3618 3619 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3620 { 3621 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3622 const struct rtw89_chip_info *chip = rtwdev->chip; 3623 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; 3624 3625 if (chip->chip_id == RTL8851B) 3626 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; 3627 3628 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3629 3630 if (rtwpci->under_recovery) { 3631 rtwpci->intrs[0] = hs0isr_ind_int_en; 3632 rtwpci->intrs[1] = 0; 3633 } else { 3634 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3635 B_AX_RXDMA_INT_EN | 3636 B_AX_RXP1DMA_INT_EN | 3637 B_AX_RPQDMA_INT_EN | 3638 B_AX_RXDMA_STUCK_INT_EN | 3639 B_AX_RDU_INT_EN | 3640 B_AX_RPQBD_FULL_INT_EN | 3641 hs0isr_ind_int_en; 3642 3643 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3644 } 3645 } 3646 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3647 3648 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3649 { 3650 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3651 3652 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3653 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3654 rtwpci->intrs[0] = 0; 3655 rtwpci->intrs[1] = 0; 3656 } 3657 3658 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3659 { 3660 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3661 3662 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3663 B_AX_HS1ISR_IND_INT_EN | 3664 B_AX_HS0ISR_IND_INT_EN; 3665 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3666 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3667 B_AX_RXDMA_INT_EN | 3668 B_AX_RXP1DMA_INT_EN | 3669 B_AX_RPQDMA_INT_EN | 3670 B_AX_RXDMA_STUCK_INT_EN | 3671 B_AX_RDU_INT_EN | 3672 B_AX_RPQBD_FULL_INT_EN; 3673 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3674 } 3675 3676 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3677 { 3678 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3679 3680 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3681 B_AX_HS0ISR_IND_INT_EN; 3682 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3683 rtwpci->intrs[0] = 0; 3684 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3685 } 3686 3687 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3688 { 3689 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3690 3691 if (rtwpci->under_recovery) 3692 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3693 else if (rtwpci->low_power) 3694 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3695 else 3696 rtw89_pci_default_intr_mask_v1(rtwdev); 3697 } 3698 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3699 3700 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev) 3701 { 3702 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3703 3704 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3705 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3706 rtwpci->intrs[0] = 0; 3707 rtwpci->intrs[1] = 0; 3708 } 3709 3710 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev) 3711 { 3712 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3713 3714 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 | 3715 B_BE_HS0_IND_INT_EN0; 3716 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3717 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 | 3718 B_BE_RDU_CH0_INT_IMR_V1; 3719 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3720 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3721 } 3722 3723 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev) 3724 { 3725 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3726 3727 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 | 3728 B_BE_HS1_IND_INT_EN0; 3729 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3730 rtwpci->intrs[0] = 0; 3731 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3732 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3733 } 3734 3735 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev) 3736 { 3737 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3738 3739 if (rtwpci->under_recovery) 3740 rtw89_pci_recovery_intr_mask_v2(rtwdev); 3741 else if (rtwpci->low_power) 3742 rtw89_pci_low_power_intr_mask_v2(rtwdev); 3743 else 3744 rtw89_pci_default_intr_mask_v2(rtwdev); 3745 } 3746 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); 3747 3748 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3749 struct pci_dev *pdev) 3750 { 3751 unsigned long flags = 0; 3752 int ret; 3753 3754 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI; 3755 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3756 if (ret < 0) { 3757 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3758 goto err; 3759 } 3760 3761 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3762 rtw89_pci_interrupt_handler, 3763 rtw89_pci_interrupt_threadfn, 3764 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3765 if (ret) { 3766 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3767 goto err_free_vector; 3768 } 3769 3770 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3771 3772 return 0; 3773 3774 err_free_vector: 3775 pci_free_irq_vectors(pdev); 3776 err: 3777 return ret; 3778 } 3779 3780 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3781 struct pci_dev *pdev) 3782 { 3783 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3784 pci_free_irq_vectors(pdev); 3785 } 3786 3787 static u16 gray_code_to_bin(u16 gray_code) 3788 { 3789 u16 binary = gray_code; 3790 3791 while (gray_code) { 3792 gray_code >>= 1; 3793 binary ^= gray_code; 3794 } 3795 3796 return binary; 3797 } 3798 3799 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3800 { 3801 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3802 struct pci_dev *pdev = rtwpci->pdev; 3803 u16 val16, filter_out_val; 3804 u32 val, phy_offset; 3805 int ret; 3806 3807 if (rtwdev->chip->chip_id != RTL8852C) 3808 return 0; 3809 3810 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3811 if (val == B_AX_ASPM_CTRL_L1) 3812 return 0; 3813 3814 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3815 if (ret) 3816 return ret; 3817 3818 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3819 if (val == RTW89_PCIE_GEN1_SPEED) { 3820 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3821 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3822 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3823 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3824 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3825 val16 | B_PCIE_BIT_PINOUT_DIS); 3826 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3827 val16 & ~B_PCIE_BIT_RD_SEL); 3828 3829 val16 = rtw89_read16_mask(rtwdev, 3830 phy_offset + RAC_ANA1F * RAC_MULT, 3831 FILTER_OUT_EQ_MASK); 3832 val16 = gray_code_to_bin(val16); 3833 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3834 RAC_MULT); 3835 filter_out_val &= ~REG_FILTER_OUT_MASK; 3836 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3837 3838 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3839 filter_out_val); 3840 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3841 B_BAC_EQ_SEL); 3842 rtw89_write16_set(rtwdev, 3843 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3844 B_PCIE_BIT_PSAVE); 3845 } else { 3846 return -EOPNOTSUPP; 3847 } 3848 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3849 B_PCIE_BIT_PSAVE); 3850 3851 return 0; 3852 } 3853 3854 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3855 { 3856 const struct rtw89_pci_info *info = rtwdev->pci_info; 3857 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3858 3859 if (rtw89_pci_disable_clkreq) 3860 return; 3861 3862 gen_def->clkreq_set(rtwdev, enable); 3863 } 3864 3865 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) 3866 { 3867 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3868 int ret; 3869 3870 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3871 PCIE_CLKDLY_HW_30US); 3872 if (ret) 3873 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3874 3875 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3876 if (enable) 3877 ret = rtw89_pci_config_byte_set(rtwdev, 3878 RTW89_PCIE_L1_CTRL, 3879 RTW89_PCIE_BIT_CLK); 3880 else 3881 ret = rtw89_pci_config_byte_clr(rtwdev, 3882 RTW89_PCIE_L1_CTRL, 3883 RTW89_PCIE_BIT_CLK); 3884 if (ret) 3885 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3886 enable ? "set" : "unset", ret); 3887 } else if (chip_id == RTL8852C) { 3888 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3889 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3890 if (enable) 3891 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3892 B_AX_CLK_REQ_N); 3893 else 3894 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3895 B_AX_CLK_REQ_N); 3896 } 3897 } 3898 3899 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3900 { 3901 const struct rtw89_pci_info *info = rtwdev->pci_info; 3902 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3903 3904 if (rtw89_pci_disable_aspm_l1) 3905 return; 3906 3907 gen_def->aspm_set(rtwdev, enable); 3908 } 3909 3910 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) 3911 { 3912 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3913 u8 value = 0; 3914 int ret; 3915 3916 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3917 if (ret) 3918 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 3919 3920 u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK); 3921 u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK); 3922 3923 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3924 if (ret) 3925 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 3926 3927 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3928 if (enable) 3929 ret = rtw89_pci_config_byte_set(rtwdev, 3930 RTW89_PCIE_L1_CTRL, 3931 RTW89_PCIE_BIT_L1); 3932 else 3933 ret = rtw89_pci_config_byte_clr(rtwdev, 3934 RTW89_PCIE_L1_CTRL, 3935 RTW89_PCIE_BIT_L1); 3936 } else if (chip_id == RTL8852C) { 3937 if (enable) 3938 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3939 B_AX_ASPM_CTRL_L1); 3940 else 3941 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3942 B_AX_ASPM_CTRL_L1); 3943 } 3944 if (ret) 3945 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3946 enable ? "set" : "unset", ret); 3947 } 3948 3949 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3950 { 3951 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 3952 const struct rtw89_pci_info *info = rtwdev->pci_info; 3953 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3954 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3955 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3956 u32 val = 0; 3957 3958 if (rtwdev->scanning || 3959 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH)) 3960 goto out; 3961 3962 if (chip_gen == RTW89_CHIP_BE) 3963 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN; 3964 else 3965 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3966 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3967 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3968 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3969 3970 out: 3971 rtw89_write32(rtwdev, info->mit_addr, val); 3972 } 3973 3974 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3975 { 3976 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3977 struct pci_dev *pdev = rtwpci->pdev; 3978 u16 link_ctrl; 3979 int ret; 3980 3981 /* Though there is standard PCIE configuration space to set the 3982 * link control register, but by Realtek's design, driver should 3983 * check if host supports CLKREQ/ASPM to enable the HW module. 3984 * 3985 * These functions are implemented by two HW modules associated, 3986 * one is responsible to access PCIE configuration space to 3987 * follow the host settings, and another is in charge of doing 3988 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3989 * the host does not support it, and due to some reasons or wrong 3990 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3991 * loss if HW misbehaves on the link. 3992 * 3993 * Hence it's designed that driver should first check the PCIE 3994 * configuration space is sync'ed and enabled, then driver can turn 3995 * on the other module that is actually working on the mechanism. 3996 */ 3997 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3998 if (ret) { 3999 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 4000 return; 4001 } 4002 4003 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 4004 rtw89_pci_clkreq_set(rtwdev, true); 4005 4006 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 4007 rtw89_pci_aspm_set(rtwdev, true); 4008 } 4009 4010 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 4011 { 4012 const struct rtw89_pci_info *info = rtwdev->pci_info; 4013 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4014 4015 if (rtw89_pci_disable_l1ss) 4016 return; 4017 4018 gen_def->l1ss_set(rtwdev, enable); 4019 } 4020 4021 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) 4022 { 4023 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4024 int ret; 4025 4026 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4027 if (enable) 4028 ret = rtw89_pci_config_byte_set(rtwdev, 4029 RTW89_PCIE_TIMER_CTRL, 4030 RTW89_PCIE_BIT_L1SUB); 4031 else 4032 ret = rtw89_pci_config_byte_clr(rtwdev, 4033 RTW89_PCIE_TIMER_CTRL, 4034 RTW89_PCIE_BIT_L1SUB); 4035 if (ret) 4036 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 4037 enable ? "set" : "unset", ret); 4038 } else if (chip_id == RTL8852C) { 4039 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 4040 RTW89_PCIE_BIT_ASPM_L11 | 4041 RTW89_PCIE_BIT_PCI_L11); 4042 if (ret) 4043 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 4044 if (enable) 4045 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4046 B_AX_L1SUB_DISABLE); 4047 else 4048 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4049 B_AX_L1SUB_DISABLE); 4050 } 4051 } 4052 4053 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 4054 { 4055 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4056 struct pci_dev *pdev = rtwpci->pdev; 4057 u32 l1ss_cap_ptr, l1ss_ctrl; 4058 4059 if (rtw89_pci_disable_l1ss) 4060 return; 4061 4062 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 4063 if (!l1ss_cap_ptr) 4064 return; 4065 4066 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 4067 4068 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 4069 rtw89_pci_l1ss_set(rtwdev, true); 4070 } 4071 4072 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev) 4073 { 4074 int ret = 0; 4075 u32 sts; 4076 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 4077 4078 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 4079 10, 1000, false, rtwdev, 4080 R_AX_PCIE_DMA_BUSY1); 4081 if (ret) { 4082 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 4083 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 4084 return -EINVAL; 4085 } 4086 return ret; 4087 } 4088 4089 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev) 4090 { 4091 u32 val; 4092 int ret; 4093 4094 if (rtwdev->chip->chip_id == RTL8852C) 4095 return 0; 4096 4097 rtw89_pci_ctrl_dma_all(rtwdev, false); 4098 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4099 if (ret) { 4100 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4101 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4102 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 4103 R_AX_DBG_ERR_FLAG, val); 4104 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 4105 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 4106 if (val & B_AX_RX_STUCK) 4107 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 4108 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4109 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4110 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4111 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4112 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 4113 R_AX_DBG_ERR_FLAG, val); 4114 } 4115 4116 return ret; 4117 } 4118 4119 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) 4120 { 4121 u32 ret; 4122 4123 if (rtwdev->chip->chip_id == RTL8852C) 4124 return 0; 4125 4126 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 4127 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4128 rtw89_pci_clr_idx_all(rtwdev); 4129 4130 ret = rtw89_pci_rst_bdram_ax(rtwdev); 4131 if (ret) 4132 return ret; 4133 4134 rtw89_pci_ctrl_dma_all(rtwdev, true); 4135 return ret; 4136 } 4137 4138 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 4139 enum rtw89_lv1_rcvy_step step) 4140 { 4141 const struct rtw89_pci_info *info = rtwdev->pci_info; 4142 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4143 int ret; 4144 4145 switch (step) { 4146 case RTW89_LV1_RCVY_STEP_1: 4147 ret = gen_def->lv1rst_stop_dma(rtwdev); 4148 if (ret) 4149 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 4150 4151 break; 4152 4153 case RTW89_LV1_RCVY_STEP_2: 4154 ret = gen_def->lv1rst_start_dma(rtwdev); 4155 if (ret) 4156 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 4157 break; 4158 4159 default: 4160 return -EINVAL; 4161 } 4162 4163 return ret; 4164 } 4165 4166 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 4167 { 4168 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 4169 return; 4170 4171 if (rtwdev->chip->chip_id == RTL8852C) { 4172 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4173 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1)); 4174 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4175 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1)); 4176 } else { 4177 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 4178 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 4179 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4180 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 4181 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4182 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 4183 } 4184 } 4185 4186 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 4187 { 4188 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 4189 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4190 const struct rtw89_pci_info *info = rtwdev->pci_info; 4191 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4192 unsigned long flags; 4193 int work_done; 4194 4195 rtwdev->napi_budget_countdown = budget; 4196 4197 rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data); 4198 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4199 if (work_done == budget) 4200 return budget; 4201 4202 rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data); 4203 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4204 if (work_done < budget && napi_complete_done(napi, work_done)) { 4205 spin_lock_irqsave(&rtwpci->irq_lock, flags); 4206 if (likely(rtwpci->running)) 4207 rtw89_chip_enable_intr(rtwdev, rtwpci); 4208 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 4209 } 4210 4211 return work_done; 4212 } 4213 4214 static 4215 void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev, 4216 struct pci_dev *pdev, 4217 const struct rtw89_pci_ssid_quirk *ssid_quirks) 4218 { 4219 int i; 4220 4221 if (!ssid_quirks) 4222 return; 4223 4224 for (i = 0; i < 200; i++, ssid_quirks++) { 4225 if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0) 4226 break; 4227 4228 if (ssid_quirks->vendor != pdev->vendor || 4229 ssid_quirks->device != pdev->device || 4230 ssid_quirks->subsystem_vendor != pdev->subsystem_vendor || 4231 ssid_quirks->subsystem_device != pdev->subsystem_device) 4232 continue; 4233 4234 bitmap_or(rtwdev->quirks, rtwdev->quirks, &ssid_quirks->bitmap, 4235 NUM_OF_RTW89_QUIRKS); 4236 rtwdev->custid = ssid_quirks->custid; 4237 break; 4238 } 4239 4240 rtw89_debug(rtwdev, RTW89_DBG_HCI, "quirks=%*ph custid=%d\n", 4241 (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid); 4242 } 4243 4244 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 4245 { 4246 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4247 struct rtw89_dev *rtwdev = hw->priv; 4248 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4249 4250 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4251 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4252 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4253 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4254 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 4255 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4256 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 4257 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4258 } else { 4259 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4260 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4261 } 4262 4263 return 0; 4264 } 4265 4266 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 4267 { 4268 if (rtwdev->chip->chip_id == RTL8852C) 4269 return; 4270 4271 /* Hardware need write the reg twice to ensure the setting work */ 4272 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4273 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4274 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4275 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4276 } 4277 4278 void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume) 4279 { 4280 if (resume) 4281 rtw89_pci_cfg_dac(rtwdev); 4282 4283 rtw89_pci_disable_eq(rtwdev); 4284 rtw89_pci_filter_out(rtwdev); 4285 rtw89_pci_link_cfg(rtwdev); 4286 rtw89_pci_l1ss_cfg(rtwdev); 4287 } 4288 4289 static int __maybe_unused rtw89_pci_resume(struct device *dev) 4290 { 4291 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4292 struct rtw89_dev *rtwdev = hw->priv; 4293 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4294 4295 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4296 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4297 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4298 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4299 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 4300 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4301 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 4302 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4303 } else { 4304 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4305 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4306 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4307 B_AX_SEL_REQ_ENTR_L1); 4308 } 4309 rtw89_pci_l2_hci_ldo(rtwdev); 4310 4311 rtw89_pci_basic_cfg(rtwdev, true); 4312 4313 return 0; 4314 } 4315 4316 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 4317 EXPORT_SYMBOL(rtw89_pm_ops); 4318 4319 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4320 .isr_rdu = B_AX_RDU_INT, 4321 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, 4322 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, 4323 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, 4324 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | 4325 B_AX_RDU_INT}, 4326 4327 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4328 .mac_pre_deinit = NULL, 4329 .mac_post_init = rtw89_pci_ops_mac_post_init_ax, 4330 4331 .clr_idx_all = rtw89_pci_clr_idx_all_ax, 4332 .rst_bdram = rtw89_pci_rst_bdram_ax, 4333 4334 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax, 4335 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax, 4336 4337 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax, 4338 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax, 4339 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax, 4340 4341 .aspm_set = rtw89_pci_aspm_set_ax, 4342 .clkreq_set = rtw89_pci_clkreq_set_ax, 4343 .l1ss_set = rtw89_pci_l1ss_set_ax, 4344 4345 .disable_eq = rtw89_pci_disable_eq_ax, 4346 }; 4347 EXPORT_SYMBOL(rtw89_pci_gen_ax); 4348 4349 static const struct rtw89_hci_ops rtw89_pci_ops = { 4350 .tx_write = rtw89_pci_ops_tx_write, 4351 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 4352 .flush_queues = rtw89_pci_ops_flush_queues, 4353 .reset = rtw89_pci_ops_reset, 4354 .start = rtw89_pci_ops_start, 4355 .stop = rtw89_pci_ops_stop, 4356 .pause = rtw89_pci_ops_pause, 4357 .switch_mode = rtw89_pci_ops_switch_mode, 4358 .recalc_int_mit = rtw89_pci_recalc_int_mit, 4359 4360 .read8 = rtw89_pci_ops_read8, 4361 .read16 = rtw89_pci_ops_read16, 4362 .read32 = rtw89_pci_ops_read32, 4363 .write8 = rtw89_pci_ops_write8, 4364 .write16 = rtw89_pci_ops_write16, 4365 .write32 = rtw89_pci_ops_write32, 4366 4367 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 4368 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit, 4369 .mac_post_init = rtw89_pci_ops_mac_post_init, 4370 .deinit = rtw89_pci_ops_deinit, 4371 4372 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 4373 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 4374 .dump_err_status = rtw89_pci_ops_dump_err_status, 4375 .napi_poll = rtw89_pci_napi_poll, 4376 4377 .recovery_start = rtw89_pci_ops_recovery_start, 4378 .recovery_complete = rtw89_pci_ops_recovery_complete, 4379 4380 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch, 4381 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch, 4382 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 4383 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle, 4384 4385 .clr_idx_all = rtw89_pci_clr_idx_all, 4386 .clear = rtw89_pci_clear_resource, 4387 .disable_intr = rtw89_pci_disable_intr_lock, 4388 .enable_intr = rtw89_pci_enable_intr_lock, 4389 .rst_bdram = rtw89_pci_reset_bdram, 4390 }; 4391 4392 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4393 { 4394 struct rtw89_dev *rtwdev; 4395 const struct rtw89_driver_info *info; 4396 const struct rtw89_pci_info *pci_info; 4397 int ret; 4398 4399 info = (const struct rtw89_driver_info *)id->driver_data; 4400 4401 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 4402 sizeof(struct rtw89_pci), 4403 info->chip); 4404 if (!rtwdev) { 4405 dev_err(&pdev->dev, "failed to allocate hw\n"); 4406 return -ENOMEM; 4407 } 4408 4409 pci_info = info->bus.pci; 4410 4411 rtwdev->pci_info = info->bus.pci; 4412 rtwdev->hci.ops = &rtw89_pci_ops; 4413 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 4414 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 4415 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 4416 4417 rtw89_check_quirks(rtwdev, info->quirks); 4418 rtw89_check_pci_ssid_quirks(rtwdev, pdev, pci_info->ssid_quirks); 4419 4420 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 4421 4422 ret = rtw89_core_init(rtwdev); 4423 if (ret) { 4424 rtw89_err(rtwdev, "failed to initialise core\n"); 4425 goto err_release_hw; 4426 } 4427 4428 ret = rtw89_pci_claim_device(rtwdev, pdev); 4429 if (ret) { 4430 rtw89_err(rtwdev, "failed to claim pci device\n"); 4431 goto err_core_deinit; 4432 } 4433 4434 ret = rtw89_pci_setup_resource(rtwdev, pdev); 4435 if (ret) { 4436 rtw89_err(rtwdev, "failed to setup pci resource\n"); 4437 goto err_declaim_pci; 4438 } 4439 4440 ret = rtw89_chip_info_setup(rtwdev); 4441 if (ret) { 4442 rtw89_err(rtwdev, "failed to setup chip information\n"); 4443 goto err_clear_resource; 4444 } 4445 4446 rtw89_pci_basic_cfg(rtwdev, false); 4447 4448 ret = rtw89_core_napi_init(rtwdev); 4449 if (ret) { 4450 rtw89_err(rtwdev, "failed to init napi\n"); 4451 goto err_clear_resource; 4452 } 4453 4454 ret = rtw89_pci_request_irq(rtwdev, pdev); 4455 if (ret) { 4456 rtw89_err(rtwdev, "failed to request pci irq\n"); 4457 goto err_deinit_napi; 4458 } 4459 4460 ret = rtw89_core_register(rtwdev); 4461 if (ret) { 4462 rtw89_err(rtwdev, "failed to register core\n"); 4463 goto err_free_irq; 4464 } 4465 4466 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags); 4467 4468 return 0; 4469 4470 err_free_irq: 4471 rtw89_pci_free_irq(rtwdev, pdev); 4472 err_deinit_napi: 4473 rtw89_core_napi_deinit(rtwdev); 4474 err_clear_resource: 4475 rtw89_pci_clear_resource(rtwdev, pdev); 4476 err_declaim_pci: 4477 rtw89_pci_declaim_device(rtwdev, pdev); 4478 err_core_deinit: 4479 rtw89_core_deinit(rtwdev); 4480 err_release_hw: 4481 rtw89_free_ieee80211_hw(rtwdev); 4482 4483 return ret; 4484 } 4485 EXPORT_SYMBOL(rtw89_pci_probe); 4486 4487 void rtw89_pci_remove(struct pci_dev *pdev) 4488 { 4489 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4490 struct rtw89_dev *rtwdev; 4491 4492 rtwdev = hw->priv; 4493 4494 rtw89_pci_free_irq(rtwdev, pdev); 4495 rtw89_core_napi_deinit(rtwdev); 4496 rtw89_core_unregister(rtwdev); 4497 rtw89_pci_clear_resource(rtwdev, pdev); 4498 rtw89_pci_declaim_device(rtwdev, pdev); 4499 rtw89_core_deinit(rtwdev); 4500 rtw89_free_ieee80211_hw(rtwdev); 4501 } 4502 EXPORT_SYMBOL(rtw89_pci_remove); 4503 4504 MODULE_AUTHOR("Realtek Corporation"); 4505 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); 4506 MODULE_LICENSE("Dual BSD/GPL"); 4507