1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev, 23 u32 *phy_offset) 24 { 25 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 26 struct pci_dev *pdev = rtwpci->pdev; 27 u32 val; 28 int ret; 29 30 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 31 if (ret) 32 return ret; 33 34 val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK); 35 if (val == RTW89_PCIE_GEN1_SPEED) { 36 *phy_offset = R_RAC_DIRECT_OFFSET_G1; 37 } else if (val == RTW89_PCIE_GEN2_SPEED) { 38 *phy_offset = R_RAC_DIRECT_OFFSET_G2; 39 } else { 40 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val); 41 return -EFAULT; 42 } 43 44 return 0; 45 } 46 47 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) 48 { 49 u32 val; 50 int ret; 51 52 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM); 53 54 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 55 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 56 rtwdev, R_AX_PCIE_INIT_CFG1); 57 58 return ret; 59 } 60 61 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_dma_ring *bd_ring, 63 u32 cur_idx, bool tx) 64 { 65 const struct rtw89_pci_info *info = rtwdev->pci_info; 66 u32 cnt, cur_rp, wp, rp, len; 67 68 rp = bd_ring->rp; 69 wp = bd_ring->wp; 70 len = bd_ring->len; 71 72 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 73 if (tx) { 74 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 75 } else { 76 if (info->rx_ring_eq_is_full) 77 wp += 1; 78 79 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 80 } 81 82 bd_ring->rp = cur_rp; 83 84 return cnt; 85 } 86 87 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 88 struct rtw89_pci_tx_ring *tx_ring) 89 { 90 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 91 u32 addr_idx = bd_ring->addr.idx; 92 u32 cnt, idx; 93 94 idx = rtw89_read32(rtwdev, addr_idx); 95 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 96 97 return cnt; 98 } 99 100 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 101 struct rtw89_pci *rtwpci, 102 u32 cnt, bool release_all) 103 { 104 struct rtw89_pci_tx_data *tx_data; 105 struct sk_buff *skb; 106 u32 qlen; 107 108 while (cnt--) { 109 skb = skb_dequeue(&rtwpci->h2c_queue); 110 if (!skb) { 111 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 112 return; 113 } 114 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 115 } 116 117 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 118 if (!release_all) 119 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 120 121 while (qlen--) { 122 skb = skb_dequeue(&rtwpci->h2c_release_queue); 123 if (!skb) { 124 rtw89_err(rtwdev, "failed to release fwcmd\n"); 125 return; 126 } 127 tx_data = RTW89_PCI_TX_SKB_CB(skb); 128 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 129 DMA_TO_DEVICE); 130 dev_kfree_skb_any(skb); 131 } 132 } 133 134 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 135 struct rtw89_pci *rtwpci) 136 { 137 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 138 u32 cnt; 139 140 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 141 if (!cnt) 142 return; 143 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 144 } 145 146 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 147 struct rtw89_pci_rx_ring *rx_ring) 148 { 149 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 150 u32 addr_idx = bd_ring->addr.idx; 151 u32 cnt, idx; 152 153 idx = rtw89_read32(rtwdev, addr_idx); 154 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 155 156 return cnt; 157 } 158 159 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 160 struct sk_buff *skb) 161 { 162 struct rtw89_pci_rx_info *rx_info; 163 dma_addr_t dma; 164 165 rx_info = RTW89_PCI_RX_SKB_CB(skb); 166 dma = rx_info->dma; 167 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 168 DMA_FROM_DEVICE); 169 } 170 171 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 172 struct sk_buff *skb) 173 { 174 struct rtw89_pci_rx_info *rx_info; 175 dma_addr_t dma; 176 177 rx_info = RTW89_PCI_RX_SKB_CB(skb); 178 dma = rx_info->dma; 179 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 180 DMA_FROM_DEVICE); 181 } 182 183 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 184 struct sk_buff *skb) 185 { 186 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 187 struct rtw89_pci_rxbd_info *rxbd_info; 188 __le32 info; 189 190 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 191 info = rxbd_info->dword; 192 193 rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); 194 rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); 195 rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); 196 rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); 197 } 198 199 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, 200 struct rtw89_pci_rx_ring *rx_ring, 201 struct sk_buff *skb) 202 { 203 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 204 const struct rtw89_pci_info *info = rtwdev->pci_info; 205 u32 target_rx_tag; 206 207 if (!info->check_rx_tag) 208 return 0; 209 210 /* valid range is 1 ~ 0x1FFF */ 211 if (rx_ring->target_rx_tag == 0) 212 target_rx_tag = 1; 213 else 214 target_rx_tag = rx_ring->target_rx_tag; 215 216 if (rx_info->tag != target_rx_tag) { 217 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n", 218 rx_info->tag, target_rx_tag); 219 return -EAGAIN; 220 } 221 222 return 0; 223 } 224 225 static 226 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, 227 struct rtw89_pci_rx_ring *rx_ring, 228 struct sk_buff *skb) 229 { 230 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 231 int rx_tag_retry = 100; 232 int ret; 233 234 do { 235 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 236 rtw89_pci_rxbd_info_update(rtwdev, skb); 237 238 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb); 239 if (ret != -EAGAIN) 240 break; 241 } while (rx_tag_retry--); 242 243 /* update target rx_tag for next RX */ 244 rx_ring->target_rx_tag = rx_info->tag + 1; 245 246 return ret; 247 } 248 249 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable) 250 { 251 const struct rtw89_pci_info *info = rtwdev->pci_info; 252 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 253 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 254 255 if (enable) { 256 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 257 if (dma_stop2->addr) 258 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 259 } else { 260 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 261 if (dma_stop2->addr) 262 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 263 } 264 } 265 266 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable) 267 { 268 const struct rtw89_pci_info *info = rtwdev->pci_info; 269 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 270 271 if (enable) 272 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 273 else 274 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 275 } 276 277 static bool 278 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 279 struct sk_buff *new, 280 const struct sk_buff *skb, u32 offset, 281 const struct rtw89_pci_rx_info *rx_info, 282 const struct rtw89_rx_desc_info *desc_info) 283 { 284 u32 copy_len = rx_info->len - offset; 285 286 if (unlikely(skb_tailroom(new) < copy_len)) { 287 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 288 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 289 rx_info->len, desc_info->pkt_size, offset, fs, ls); 290 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 291 skb->data, rx_info->len); 292 /* length of a single segment skb is desc_info->pkt_size */ 293 if (fs && ls) { 294 copy_len = desc_info->pkt_size; 295 } else { 296 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 297 return false; 298 } 299 } 300 301 skb_put_data(new, skb->data + offset, copy_len); 302 303 return true; 304 } 305 306 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev, 307 struct rtw89_pci_dma_ring *bd_ring) 308 { 309 const struct rtw89_pci_info *info = rtwdev->pci_info; 310 u32 wp = bd_ring->wp; 311 312 if (!info->rx_ring_eq_is_full) 313 return wp; 314 315 if (++wp >= bd_ring->len) 316 wp = 0; 317 318 return wp; 319 } 320 321 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 322 struct rtw89_pci_rx_ring *rx_ring) 323 { 324 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 325 struct rtw89_pci_rx_info *rx_info; 326 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 327 struct sk_buff *new = rx_ring->diliver_skb; 328 struct sk_buff *skb; 329 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 330 u32 skb_idx; 331 u32 offset; 332 u32 cnt = 1; 333 bool fs, ls; 334 int ret; 335 336 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 337 skb = rx_ring->buf[skb_idx]; 338 339 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 340 if (ret) { 341 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 342 bd_ring->wp, ret); 343 goto err_sync_device; 344 } 345 346 rx_info = RTW89_PCI_RX_SKB_CB(skb); 347 fs = rx_info->fs; 348 ls = rx_info->ls; 349 350 if (fs) { 351 if (new) { 352 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 353 "skb should not be ready before first segment start\n"); 354 goto err_sync_device; 355 } 356 if (desc_info->ready) { 357 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 358 goto err_sync_device; 359 } 360 361 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 362 363 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 364 if (!new) 365 goto err_sync_device; 366 367 rx_ring->diliver_skb = new; 368 369 /* first segment has RX desc */ 370 offset = desc_info->offset + desc_info->rxd_len; 371 } else { 372 offset = sizeof(struct rtw89_pci_rxbd_info); 373 if (!new) { 374 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 375 goto err_sync_device; 376 } 377 } 378 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 379 goto err_sync_device; 380 rtw89_pci_sync_skb_for_device(rtwdev, skb); 381 rtw89_pci_rxbd_increase(rx_ring, 1); 382 383 if (!desc_info->ready) { 384 rtw89_warn(rtwdev, "no rx desc information\n"); 385 goto err_free_resource; 386 } 387 if (ls) { 388 rtw89_core_rx(rtwdev, desc_info, new); 389 rx_ring->diliver_skb = NULL; 390 desc_info->ready = false; 391 } 392 393 return cnt; 394 395 err_sync_device: 396 rtw89_pci_sync_skb_for_device(rtwdev, skb); 397 rtw89_pci_rxbd_increase(rx_ring, 1); 398 err_free_resource: 399 if (new) 400 dev_kfree_skb_any(new); 401 rx_ring->diliver_skb = NULL; 402 desc_info->ready = false; 403 404 return cnt; 405 } 406 407 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 408 struct rtw89_pci_rx_ring *rx_ring, 409 u32 cnt) 410 { 411 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 412 u32 rx_cnt; 413 414 while (cnt && rtwdev->napi_budget_countdown > 0) { 415 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 416 if (!rx_cnt) { 417 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 418 419 /* skip the rest RXBD bufs */ 420 rtw89_pci_rxbd_increase(rx_ring, cnt); 421 break; 422 } 423 424 cnt -= rx_cnt; 425 } 426 427 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 428 } 429 430 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 431 struct rtw89_pci *rtwpci, int budget) 432 { 433 struct rtw89_pci_rx_ring *rx_ring; 434 int countdown = rtwdev->napi_budget_countdown; 435 u32 cnt; 436 437 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 438 439 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 440 if (!cnt) 441 return 0; 442 443 cnt = min_t(u32, budget, cnt); 444 445 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 446 447 /* In case of flushing pending SKBs, the countdown may exceed. */ 448 if (rtwdev->napi_budget_countdown <= 0) 449 return budget; 450 451 return budget - countdown; 452 } 453 454 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 455 struct rtw89_pci_tx_ring *tx_ring, 456 struct sk_buff *skb, u8 tx_status) 457 { 458 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 459 struct ieee80211_tx_info *info; 460 461 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 462 463 info = IEEE80211_SKB_CB(skb); 464 ieee80211_tx_info_clear_status(info); 465 466 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 467 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 468 if (tx_status == RTW89_TX_DONE) { 469 info->flags |= IEEE80211_TX_STAT_ACK; 470 tx_ring->tx_acked++; 471 } else { 472 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 473 rtw89_debug(rtwdev, RTW89_DBG_FW, 474 "failed to TX of status %x\n", tx_status); 475 switch (tx_status) { 476 case RTW89_TX_RETRY_LIMIT: 477 tx_ring->tx_retry_lmt++; 478 break; 479 case RTW89_TX_LIFE_TIME: 480 tx_ring->tx_life_time++; 481 break; 482 case RTW89_TX_MACID_DROP: 483 tx_ring->tx_mac_id_drop++; 484 break; 485 default: 486 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 487 break; 488 } 489 } 490 491 ieee80211_tx_status_ni(rtwdev->hw, skb); 492 } 493 494 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 495 { 496 struct rtw89_pci_tx_wd *txwd; 497 u32 cnt; 498 499 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 500 while (cnt--) { 501 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 502 if (!txwd) { 503 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 504 break; 505 } 506 507 list_del_init(&txwd->list); 508 509 /* this skb has been freed by RPP */ 510 if (skb_queue_len(&txwd->queue) == 0) 511 rtw89_pci_enqueue_txwd(tx_ring, txwd); 512 } 513 } 514 515 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 516 struct rtw89_pci_tx_ring *tx_ring) 517 { 518 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 519 struct rtw89_pci_tx_wd *txwd; 520 int i; 521 522 for (i = 0; i < wd_ring->page_num; i++) { 523 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 524 if (!txwd) 525 break; 526 527 list_del_init(&txwd->list); 528 } 529 } 530 531 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 532 struct rtw89_pci_tx_ring *tx_ring, 533 struct rtw89_pci_tx_wd *txwd, u16 seq, 534 u8 tx_status) 535 { 536 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 537 struct rtw89_pci_tx_data *tx_data; 538 struct sk_buff *skb, *tmp; 539 u8 txch = tx_ring->txch; 540 541 if (!list_empty(&txwd->list)) { 542 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 543 /* In low power mode, RPP can receive before updating of TX BD. 544 * In normal mode, it should not happen so give it a warning. 545 */ 546 if (!rtwpci->low_power && !list_empty(&txwd->list)) 547 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 548 txch, seq); 549 } 550 551 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 552 skb_unlink(skb, &txwd->queue); 553 554 tx_data = RTW89_PCI_TX_SKB_CB(skb); 555 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 556 DMA_TO_DEVICE); 557 558 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 559 } 560 561 if (list_empty(&txwd->list)) 562 rtw89_pci_enqueue_txwd(tx_ring, txwd); 563 } 564 565 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 566 struct rtw89_pci_rpp_fmt *rpp) 567 { 568 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 569 struct rtw89_pci_tx_ring *tx_ring; 570 struct rtw89_pci_tx_wd_ring *wd_ring; 571 struct rtw89_pci_tx_wd *txwd; 572 u16 seq; 573 u8 qsel, tx_status, txch; 574 575 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 576 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 577 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 578 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 579 580 if (txch == RTW89_TXCH_CH12) { 581 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 582 return; 583 } 584 585 tx_ring = &rtwpci->tx_rings[txch]; 586 wd_ring = &tx_ring->wd_ring; 587 txwd = &wd_ring->pages[seq]; 588 589 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 590 } 591 592 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 593 struct rtw89_pci_tx_ring *tx_ring) 594 { 595 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 596 struct rtw89_pci_tx_wd *txwd; 597 int i; 598 599 for (i = 0; i < wd_ring->page_num; i++) { 600 txwd = &wd_ring->pages[i]; 601 602 if (!list_empty(&txwd->list)) 603 continue; 604 605 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 606 } 607 } 608 609 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 610 struct rtw89_pci_rx_ring *rx_ring, 611 u32 max_cnt) 612 { 613 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 614 struct rtw89_pci_rx_info *rx_info; 615 struct rtw89_pci_rpp_fmt *rpp; 616 struct rtw89_rx_desc_info desc_info = {}; 617 struct sk_buff *skb; 618 u32 cnt = 0; 619 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 620 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 621 u32 skb_idx; 622 u32 offset; 623 int ret; 624 625 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 626 skb = rx_ring->buf[skb_idx]; 627 628 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 629 if (ret) { 630 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 631 bd_ring->wp, ret); 632 goto err_sync_device; 633 } 634 635 rx_info = RTW89_PCI_RX_SKB_CB(skb); 636 if (!rx_info->fs || !rx_info->ls) { 637 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 638 return cnt; 639 } 640 641 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 642 643 /* first segment has RX desc */ 644 offset = desc_info.offset + desc_info.rxd_len; 645 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 646 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 647 rtw89_pci_release_rpp(rtwdev, rpp); 648 } 649 650 rtw89_pci_sync_skb_for_device(rtwdev, skb); 651 rtw89_pci_rxbd_increase(rx_ring, 1); 652 cnt++; 653 654 return cnt; 655 656 err_sync_device: 657 rtw89_pci_sync_skb_for_device(rtwdev, skb); 658 return 0; 659 } 660 661 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 662 struct rtw89_pci_rx_ring *rx_ring, 663 u32 cnt) 664 { 665 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 666 u32 release_cnt; 667 668 while (cnt) { 669 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 670 if (!release_cnt) { 671 rtw89_err(rtwdev, "failed to release TX skbs\n"); 672 673 /* skip the rest RXBD bufs */ 674 rtw89_pci_rxbd_increase(rx_ring, cnt); 675 break; 676 } 677 678 cnt -= release_cnt; 679 } 680 681 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 682 } 683 684 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 685 struct rtw89_pci *rtwpci, int budget) 686 { 687 struct rtw89_pci_rx_ring *rx_ring; 688 u32 cnt; 689 int work_done; 690 691 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 692 693 spin_lock_bh(&rtwpci->trx_lock); 694 695 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 696 if (cnt == 0) 697 goto out_unlock; 698 699 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 700 701 out_unlock: 702 spin_unlock_bh(&rtwpci->trx_lock); 703 704 /* always release all RPQ */ 705 work_done = min_t(int, cnt, budget); 706 rtwdev->napi_budget_countdown -= work_done; 707 708 return work_done; 709 } 710 711 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 712 struct rtw89_pci *rtwpci) 713 { 714 struct rtw89_pci_rx_ring *rx_ring; 715 struct rtw89_pci_dma_ring *bd_ring; 716 u32 reg_idx; 717 u16 hw_idx, hw_idx_next, host_idx; 718 int i; 719 720 for (i = 0; i < RTW89_RXCH_NUM; i++) { 721 rx_ring = &rtwpci->rx_rings[i]; 722 bd_ring = &rx_ring->bd_ring; 723 724 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 725 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 726 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 727 hw_idx_next = (hw_idx + 1) % bd_ring->len; 728 729 if (hw_idx_next == host_idx) 730 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 731 732 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 733 "%d RXD unavailable, idx=0x%08x, len=%d\n", 734 i, reg_idx, bd_ring->len); 735 } 736 } 737 738 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 739 struct rtw89_pci *rtwpci, 740 struct rtw89_pci_isrs *isrs) 741 { 742 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 743 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 744 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 745 746 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 747 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 748 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 749 } 750 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 751 752 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 753 struct rtw89_pci *rtwpci, 754 struct rtw89_pci_isrs *isrs) 755 { 756 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 757 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 758 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 759 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 760 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 761 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 762 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 763 764 if (isrs->halt_c2h_isrs) 765 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 766 if (isrs->isrs[0]) 767 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 768 if (isrs->isrs[1]) 769 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 770 } 771 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 772 773 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, 774 struct rtw89_pci *rtwpci, 775 struct rtw89_pci_isrs *isrs) 776 { 777 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 778 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 779 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 780 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ? 781 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0; 782 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; 783 784 if (isrs->halt_c2h_isrs) 785 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 786 if (isrs->isrs[0]) 787 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]); 788 if (isrs->isrs[1]) 789 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 790 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 791 } 792 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); 793 794 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 795 { 796 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 797 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 798 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 799 } 800 EXPORT_SYMBOL(rtw89_pci_enable_intr); 801 802 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 803 { 804 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 805 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 806 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 807 } 808 EXPORT_SYMBOL(rtw89_pci_disable_intr); 809 810 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 811 { 812 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 813 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 814 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 815 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 816 } 817 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 818 819 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 820 { 821 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 822 } 823 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 824 825 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 826 { 827 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 828 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]); 829 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 830 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 831 } 832 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2); 833 834 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 835 { 836 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 837 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 838 } 839 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); 840 841 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 842 { 843 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 844 unsigned long flags; 845 846 spin_lock_irqsave(&rtwpci->irq_lock, flags); 847 rtw89_chip_disable_intr(rtwdev, rtwpci); 848 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 849 rtw89_chip_enable_intr(rtwdev, rtwpci); 850 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 851 } 852 853 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 854 { 855 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 856 unsigned long flags; 857 858 spin_lock_irqsave(&rtwpci->irq_lock, flags); 859 rtw89_chip_disable_intr(rtwdev, rtwpci); 860 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 861 rtw89_chip_enable_intr(rtwdev, rtwpci); 862 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 863 } 864 865 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 866 { 867 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 868 int budget = NAPI_POLL_WEIGHT; 869 870 /* To prevent RXQ get stuck due to run out of budget. */ 871 rtwdev->napi_budget_countdown = budget; 872 873 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 874 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 875 } 876 877 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 878 { 879 struct rtw89_dev *rtwdev = dev; 880 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 881 const struct rtw89_pci_info *info = rtwdev->pci_info; 882 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 883 struct rtw89_pci_isrs isrs; 884 unsigned long flags; 885 886 spin_lock_irqsave(&rtwpci->irq_lock, flags); 887 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 888 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 889 890 if (unlikely(isrs.isrs[0] & gen_def->isr_rdu)) 891 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 892 893 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h)) 894 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 895 896 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout)) 897 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 898 899 if (unlikely(rtwpci->under_recovery)) 900 goto enable_intr; 901 902 if (unlikely(rtwpci->low_power)) { 903 rtw89_pci_low_power_interrupt_handler(rtwdev); 904 goto enable_intr; 905 } 906 907 if (likely(rtwpci->running)) { 908 local_bh_disable(); 909 napi_schedule(&rtwdev->napi); 910 local_bh_enable(); 911 } 912 913 return IRQ_HANDLED; 914 915 enable_intr: 916 spin_lock_irqsave(&rtwpci->irq_lock, flags); 917 if (likely(rtwpci->running)) 918 rtw89_chip_enable_intr(rtwdev, rtwpci); 919 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 920 return IRQ_HANDLED; 921 } 922 923 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 924 { 925 struct rtw89_dev *rtwdev = dev; 926 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 927 unsigned long flags; 928 irqreturn_t irqret = IRQ_WAKE_THREAD; 929 930 spin_lock_irqsave(&rtwpci->irq_lock, flags); 931 932 /* If interrupt event is on the road, it is still trigger interrupt 933 * even we have done pci_stop() to turn off IMR. 934 */ 935 if (unlikely(!rtwpci->running)) { 936 irqret = IRQ_HANDLED; 937 goto exit; 938 } 939 940 rtw89_chip_disable_intr(rtwdev, rtwpci); 941 exit: 942 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 943 944 return irqret; 945 } 946 947 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ 948 [RTW89_TXCH_##ch_idx] = { \ 949 .num = R_##gen##_##txch##_TXBD_NUM ##v, \ 950 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 951 .bdram = 0, \ 952 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \ 953 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \ 954 } 955 956 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 957 [RTW89_TXCH_##txch] = { \ 958 .num = R_AX_##txch##_TXBD_NUM ##v, \ 959 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 960 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 961 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 962 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 963 } 964 965 #define DEF_TXCHADDRS(info, txch, v...) \ 966 [RTW89_TXCH_##txch] = { \ 967 .num = R_AX_##txch##_TXBD_NUM, \ 968 .idx = R_AX_##txch##_TXBD_IDX, \ 969 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 970 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 971 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 972 } 973 974 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ 975 [RTW89_RXCH_##ch_idx] = { \ 976 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \ 977 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \ 978 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \ 979 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \ 980 } 981 982 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 983 .tx = { 984 DEF_TXCHADDRS(info, ACH0), 985 DEF_TXCHADDRS(info, ACH1), 986 DEF_TXCHADDRS(info, ACH2), 987 DEF_TXCHADDRS(info, ACH3), 988 DEF_TXCHADDRS(info, ACH4), 989 DEF_TXCHADDRS(info, ACH5), 990 DEF_TXCHADDRS(info, ACH6), 991 DEF_TXCHADDRS(info, ACH7), 992 DEF_TXCHADDRS(info, CH8), 993 DEF_TXCHADDRS(info, CH9), 994 DEF_TXCHADDRS_TYPE1(info, CH10), 995 DEF_TXCHADDRS_TYPE1(info, CH11), 996 DEF_TXCHADDRS(info, CH12), 997 }, 998 .rx = { 999 DEF_RXCHADDRS(AX, RXQ, RXQ), 1000 DEF_RXCHADDRS(AX, RPQ, RPQ), 1001 }, 1002 }; 1003 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 1004 1005 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 1006 .tx = { 1007 DEF_TXCHADDRS(info, ACH0, _V1), 1008 DEF_TXCHADDRS(info, ACH1, _V1), 1009 DEF_TXCHADDRS(info, ACH2, _V1), 1010 DEF_TXCHADDRS(info, ACH3, _V1), 1011 DEF_TXCHADDRS(info, ACH4, _V1), 1012 DEF_TXCHADDRS(info, ACH5, _V1), 1013 DEF_TXCHADDRS(info, ACH6, _V1), 1014 DEF_TXCHADDRS(info, ACH7, _V1), 1015 DEF_TXCHADDRS(info, CH8, _V1), 1016 DEF_TXCHADDRS(info, CH9, _V1), 1017 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 1018 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 1019 DEF_TXCHADDRS(info, CH12, _V1), 1020 }, 1021 .rx = { 1022 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1), 1023 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1), 1024 }, 1025 }; 1026 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 1027 1028 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = { 1029 .tx = { 1030 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1), 1031 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1), 1032 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1), 1033 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1), 1034 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1), 1035 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1), 1036 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1), 1037 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1), 1038 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1), 1039 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1), 1040 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1), 1041 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1), 1042 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1), 1043 }, 1044 .rx = { 1045 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1), 1046 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1), 1047 }, 1048 }; 1049 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); 1050 1051 #undef DEF_TXCHADDRS_TYPE1 1052 #undef DEF_TXCHADDRS 1053 #undef DEF_RXCHADDRS 1054 1055 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 1056 enum rtw89_tx_channel txch, 1057 const struct rtw89_pci_ch_dma_addr **addr) 1058 { 1059 const struct rtw89_pci_info *info = rtwdev->pci_info; 1060 1061 if (txch >= RTW89_TXCH_NUM) 1062 return -EINVAL; 1063 1064 *addr = &info->dma_addr_set->tx[txch]; 1065 1066 return 0; 1067 } 1068 1069 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 1070 enum rtw89_rx_channel rxch, 1071 const struct rtw89_pci_ch_dma_addr **addr) 1072 { 1073 const struct rtw89_pci_info *info = rtwdev->pci_info; 1074 1075 if (rxch >= RTW89_RXCH_NUM) 1076 return -EINVAL; 1077 1078 *addr = &info->dma_addr_set->rx[rxch]; 1079 1080 return 0; 1081 } 1082 1083 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 1084 { 1085 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 1086 1087 /* reserved 1 desc check ring is full or not */ 1088 if (bd_ring->rp > bd_ring->wp) 1089 return bd_ring->rp - bd_ring->wp - 1; 1090 1091 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 1092 } 1093 1094 static 1095 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 1096 { 1097 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1098 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 1099 u32 cnt; 1100 1101 spin_lock_bh(&rtwpci->trx_lock); 1102 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 1103 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1104 spin_unlock_bh(&rtwpci->trx_lock); 1105 1106 return cnt; 1107 } 1108 1109 static 1110 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 1111 u8 txch) 1112 { 1113 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1114 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1115 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1116 u32 cnt; 1117 1118 spin_lock_bh(&rtwpci->trx_lock); 1119 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1120 if (txch != RTW89_TXCH_CH12) 1121 cnt = min(cnt, wd_ring->curr_num); 1122 spin_unlock_bh(&rtwpci->trx_lock); 1123 1124 return cnt; 1125 } 1126 1127 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1128 u8 txch) 1129 { 1130 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1131 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1132 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1133 const struct rtw89_chip_info *chip = rtwdev->chip; 1134 u32 bd_cnt, wd_cnt, min_cnt = 0; 1135 struct rtw89_pci_rx_ring *rx_ring; 1136 enum rtw89_debug_mask debug_mask; 1137 u32 cnt; 1138 1139 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 1140 1141 spin_lock_bh(&rtwpci->trx_lock); 1142 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1143 wd_cnt = wd_ring->curr_num; 1144 1145 if (wd_cnt == 0 || bd_cnt == 0) { 1146 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 1147 if (cnt) 1148 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 1149 else if (wd_cnt == 0) 1150 goto out_unlock; 1151 1152 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1153 if (bd_cnt == 0) 1154 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 1155 } 1156 1157 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1158 wd_cnt = wd_ring->curr_num; 1159 min_cnt = min(bd_cnt, wd_cnt); 1160 if (min_cnt == 0) { 1161 /* This message can be frequently shown in low power mode or 1162 * high traffic with small FIFO chips, and we have recognized it as normal 1163 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1164 */ 1165 if (rtwpci->low_power || chip->small_fifo_size) 1166 debug_mask = RTW89_DBG_TXRX; 1167 else 1168 debug_mask = RTW89_DBG_UNEXP; 1169 1170 rtw89_debug(rtwdev, debug_mask, 1171 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1172 wd_cnt, bd_cnt); 1173 } 1174 1175 out_unlock: 1176 spin_unlock_bh(&rtwpci->trx_lock); 1177 1178 return min_cnt; 1179 } 1180 1181 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1182 u8 txch) 1183 { 1184 if (rtwdev->hci.paused) 1185 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1186 1187 if (txch == RTW89_TXCH_CH12) 1188 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1189 1190 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1191 } 1192 1193 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1194 { 1195 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1196 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1197 u32 host_idx, addr; 1198 1199 spin_lock_bh(&rtwpci->trx_lock); 1200 1201 addr = bd_ring->addr.idx; 1202 host_idx = bd_ring->wp; 1203 rtw89_write16(rtwdev, addr, host_idx); 1204 1205 spin_unlock_bh(&rtwpci->trx_lock); 1206 } 1207 1208 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1209 int n_txbd) 1210 { 1211 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1212 u32 host_idx, len; 1213 1214 len = bd_ring->len; 1215 host_idx = bd_ring->wp + n_txbd; 1216 host_idx = host_idx < len ? host_idx : host_idx - len; 1217 1218 bd_ring->wp = host_idx; 1219 } 1220 1221 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1222 { 1223 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1224 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1225 1226 if (rtwdev->hci.paused) { 1227 set_bit(txch, rtwpci->kick_map); 1228 return; 1229 } 1230 1231 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1232 } 1233 1234 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1235 { 1236 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1237 struct rtw89_pci_tx_ring *tx_ring; 1238 int txch; 1239 1240 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1241 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1242 continue; 1243 1244 tx_ring = &rtwpci->tx_rings[txch]; 1245 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1246 } 1247 } 1248 1249 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1250 { 1251 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1252 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1253 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1254 u32 cur_idx, cur_rp; 1255 u8 i; 1256 1257 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1258 * define a reasonable fixed total timeout to use read_poll_timeout* 1259 * helper. Instead, we can ensure a reasonable polling times, so we 1260 * just use for loop with udelay here. 1261 */ 1262 for (i = 0; i < 60; i++) { 1263 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1264 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1265 if (cur_rp == bd_ring->wp) 1266 return; 1267 1268 udelay(1); 1269 } 1270 1271 if (!drop) 1272 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1273 } 1274 1275 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1276 bool drop) 1277 { 1278 const struct rtw89_pci_info *info = rtwdev->pci_info; 1279 u8 i; 1280 1281 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1282 /* It may be unnecessary to flush FWCMD queue. */ 1283 if (i == RTW89_TXCH_CH12) 1284 continue; 1285 if (info->tx_dma_ch_mask & BIT(i)) 1286 continue; 1287 1288 if (txchs & BIT(i)) 1289 __pci_flush_txch(rtwdev, i, drop); 1290 } 1291 } 1292 1293 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1294 bool drop) 1295 { 1296 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1297 } 1298 1299 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1300 void *txaddr_info_addr, u32 total_len, 1301 dma_addr_t dma, u8 *add_info_nr) 1302 { 1303 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1304 __le16 option; 1305 1306 txaddr_info->length = cpu_to_le16(total_len); 1307 option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1)); 1308 option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK); 1309 txaddr_info->option = option; 1310 txaddr_info->dma = cpu_to_le32(dma); 1311 1312 *add_info_nr = 1; 1313 1314 return sizeof(*txaddr_info); 1315 } 1316 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1317 1318 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1319 void *txaddr_info_addr, u32 total_len, 1320 dma_addr_t dma, u8 *add_info_nr) 1321 { 1322 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1323 u32 remain = total_len; 1324 u32 len; 1325 u16 length_option; 1326 int n; 1327 1328 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1329 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1330 TXADDR_INFO_LENTHG_V1_MAX : remain; 1331 remain -= len; 1332 1333 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1334 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1335 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1336 length_option |= u16_encode_bits(upper_32_bits(dma), 1337 B_PCIADDR_HIGH_SEL_V1_MASK); 1338 txaddr_info->length_opt = cpu_to_le16(length_option); 1339 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1340 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1341 1342 dma += len; 1343 txaddr_info++; 1344 } 1345 1346 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1347 remain, total_len); 1348 1349 *add_info_nr = n; 1350 1351 return n * sizeof(*txaddr_info); 1352 } 1353 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1354 1355 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1356 struct rtw89_pci_tx_ring *tx_ring, 1357 struct rtw89_pci_tx_wd *txwd, 1358 struct rtw89_core_tx_request *tx_req) 1359 { 1360 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1361 const struct rtw89_chip_info *chip = rtwdev->chip; 1362 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1363 struct rtw89_pci_tx_wp_info *txwp_info; 1364 void *txaddr_info_addr; 1365 struct pci_dev *pdev = rtwpci->pdev; 1366 struct sk_buff *skb = tx_req->skb; 1367 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1368 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1369 bool en_wd_info = desc_info->en_wd_info; 1370 u32 txwd_len; 1371 u32 txwp_len; 1372 u32 txaddr_info_len; 1373 dma_addr_t dma; 1374 int ret; 1375 1376 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1377 if (dma_mapping_error(&pdev->dev, dma)) { 1378 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1379 ret = -EBUSY; 1380 goto err; 1381 } 1382 1383 tx_data->dma = dma; 1384 rcu_assign_pointer(skb_data->wait, NULL); 1385 1386 txwp_len = sizeof(*txwp_info); 1387 txwd_len = chip->txwd_body_size; 1388 txwd_len += en_wd_info ? chip->txwd_info_size : 0; 1389 1390 txwp_info = txwd->vaddr + txwd_len; 1391 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1392 txwp_info->seq1 = 0; 1393 txwp_info->seq2 = 0; 1394 txwp_info->seq3 = 0; 1395 1396 tx_ring->tx_cnt++; 1397 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1398 txaddr_info_len = 1399 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1400 dma, &desc_info->addr_info_nr); 1401 1402 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1403 1404 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1405 1406 skb_queue_tail(&txwd->queue, skb); 1407 1408 return 0; 1409 1410 err: 1411 return ret; 1412 } 1413 1414 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1415 struct rtw89_pci_tx_ring *tx_ring, 1416 struct rtw89_pci_tx_bd_32 *txbd, 1417 struct rtw89_core_tx_request *tx_req) 1418 { 1419 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1420 const struct rtw89_chip_info *chip = rtwdev->chip; 1421 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1422 void *txdesc; 1423 int txdesc_size = chip->h2c_desc_size; 1424 struct pci_dev *pdev = rtwpci->pdev; 1425 struct sk_buff *skb = tx_req->skb; 1426 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1427 dma_addr_t dma; 1428 __le16 opt; 1429 1430 txdesc = skb_push(skb, txdesc_size); 1431 memset(txdesc, 0, txdesc_size); 1432 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1433 1434 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1435 if (dma_mapping_error(&pdev->dev, dma)) { 1436 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1437 return -EBUSY; 1438 } 1439 1440 tx_data->dma = dma; 1441 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1442 opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI); 1443 txbd->opt = opt; 1444 txbd->length = cpu_to_le16(skb->len); 1445 txbd->dma = cpu_to_le32(tx_data->dma); 1446 skb_queue_tail(&rtwpci->h2c_queue, skb); 1447 1448 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1449 1450 return 0; 1451 } 1452 1453 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1454 struct rtw89_pci_tx_ring *tx_ring, 1455 struct rtw89_pci_tx_bd_32 *txbd, 1456 struct rtw89_core_tx_request *tx_req) 1457 { 1458 struct rtw89_pci_tx_wd *txwd; 1459 __le16 opt; 1460 int ret; 1461 1462 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1463 * buffer with WD BODY only. So here we don't need to check the free 1464 * pages of the wd ring. 1465 */ 1466 if (tx_ring->txch == RTW89_TXCH_CH12) 1467 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1468 1469 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1470 if (!txwd) { 1471 rtw89_err(rtwdev, "no available TXWD\n"); 1472 ret = -ENOSPC; 1473 goto err; 1474 } 1475 1476 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1477 if (ret) { 1478 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1479 goto err_enqueue_wd; 1480 } 1481 1482 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1483 1484 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1485 opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI); 1486 txbd->opt = opt; 1487 txbd->length = cpu_to_le16(txwd->len); 1488 txbd->dma = cpu_to_le32(txwd->paddr); 1489 1490 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1491 1492 return 0; 1493 1494 err_enqueue_wd: 1495 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1496 err: 1497 return ret; 1498 } 1499 1500 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1501 u8 txch) 1502 { 1503 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1504 struct rtw89_pci_tx_ring *tx_ring; 1505 struct rtw89_pci_tx_bd_32 *txbd; 1506 u32 n_avail_txbd; 1507 int ret = 0; 1508 1509 /* check the tx type and dma channel for fw cmd queue */ 1510 if ((txch == RTW89_TXCH_CH12 || 1511 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1512 (txch != RTW89_TXCH_CH12 || 1513 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1514 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1515 return -EINVAL; 1516 } 1517 1518 tx_ring = &rtwpci->tx_rings[txch]; 1519 spin_lock_bh(&rtwpci->trx_lock); 1520 1521 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1522 if (n_avail_txbd == 0) { 1523 rtw89_err(rtwdev, "no available TXBD\n"); 1524 ret = -ENOSPC; 1525 goto err_unlock; 1526 } 1527 1528 txbd = rtw89_pci_get_next_txbd(tx_ring); 1529 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1530 if (ret) { 1531 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1532 goto err_unlock; 1533 } 1534 1535 spin_unlock_bh(&rtwpci->trx_lock); 1536 return 0; 1537 1538 err_unlock: 1539 spin_unlock_bh(&rtwpci->trx_lock); 1540 return ret; 1541 } 1542 1543 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1544 { 1545 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1546 int ret; 1547 1548 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1549 if (ret) { 1550 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1551 return ret; 1552 } 1553 1554 return 0; 1555 } 1556 1557 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1558 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1559 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1560 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1561 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1562 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1563 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1564 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1565 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1566 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1567 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1568 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1569 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1570 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1571 }; 1572 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1573 1574 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1575 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1576 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1577 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1578 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1579 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1580 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1581 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1582 }; 1583 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1584 1585 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev) 1586 { 1587 const struct rtw89_pci_info *info = rtwdev->pci_info; 1588 u32 addr = info->wp_sel_addr; 1589 u32 val; 1590 int i; 1591 1592 if (!info->wp_sel_addr) 1593 return; 1594 1595 for (i = 0; i < 16; i += 4) { 1596 val = u32_encode_bits(i + 0, MASKBYTE0) | 1597 u32_encode_bits(i + 1, MASKBYTE1) | 1598 u32_encode_bits(i + 2, MASKBYTE2) | 1599 u32_encode_bits(i + 3, MASKBYTE3); 1600 rtw89_write32(rtwdev, addr + i, val); 1601 } 1602 } 1603 1604 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1605 { 1606 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1607 const struct rtw89_pci_info *info = rtwdev->pci_info; 1608 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1609 struct rtw89_pci_tx_ring *tx_ring; 1610 struct rtw89_pci_rx_ring *rx_ring; 1611 struct rtw89_pci_dma_ring *bd_ring; 1612 const struct rtw89_pci_bd_ram *bd_ram; 1613 u32 addr_num; 1614 u32 addr_idx; 1615 u32 addr_bdram; 1616 u32 addr_desa_l; 1617 u32 val32; 1618 int i; 1619 1620 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1621 if (info->tx_dma_ch_mask & BIT(i)) 1622 continue; 1623 1624 tx_ring = &rtwpci->tx_rings[i]; 1625 bd_ring = &tx_ring->bd_ring; 1626 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; 1627 addr_num = bd_ring->addr.num; 1628 addr_bdram = bd_ring->addr.bdram; 1629 addr_desa_l = bd_ring->addr.desa_l; 1630 bd_ring->wp = 0; 1631 bd_ring->rp = 0; 1632 1633 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1634 if (addr_bdram && bd_ram) { 1635 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1636 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1637 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1638 1639 rtw89_write32(rtwdev, addr_bdram, val32); 1640 } 1641 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1642 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1643 } 1644 1645 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1646 rx_ring = &rtwpci->rx_rings[i]; 1647 bd_ring = &rx_ring->bd_ring; 1648 addr_num = bd_ring->addr.num; 1649 addr_idx = bd_ring->addr.idx; 1650 addr_desa_l = bd_ring->addr.desa_l; 1651 if (info->rx_ring_eq_is_full) 1652 bd_ring->wp = bd_ring->len - 1; 1653 else 1654 bd_ring->wp = 0; 1655 bd_ring->rp = 0; 1656 rx_ring->diliver_skb = NULL; 1657 rx_ring->diliver_desc.ready = false; 1658 rx_ring->target_rx_tag = 0; 1659 1660 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1661 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1662 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1663 1664 if (info->rx_ring_eq_is_full) 1665 rtw89_write16(rtwdev, addr_idx, bd_ring->wp); 1666 } 1667 1668 rtw89_pci_init_wp_16sel(rtwdev); 1669 } 1670 1671 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1672 struct rtw89_pci_tx_ring *tx_ring) 1673 { 1674 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1675 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1676 } 1677 1678 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1679 { 1680 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1681 const struct rtw89_pci_info *info = rtwdev->pci_info; 1682 int txch; 1683 1684 rtw89_pci_reset_trx_rings(rtwdev); 1685 1686 spin_lock_bh(&rtwpci->trx_lock); 1687 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1688 if (info->tx_dma_ch_mask & BIT(txch)) 1689 continue; 1690 if (txch == RTW89_TXCH_CH12) { 1691 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1692 skb_queue_len(&rtwpci->h2c_queue), true); 1693 continue; 1694 } 1695 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1696 } 1697 spin_unlock_bh(&rtwpci->trx_lock); 1698 } 1699 1700 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1701 { 1702 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1703 unsigned long flags; 1704 1705 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1706 rtwpci->running = true; 1707 rtw89_chip_enable_intr(rtwdev, rtwpci); 1708 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1709 } 1710 1711 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1712 { 1713 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1714 unsigned long flags; 1715 1716 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1717 rtwpci->running = false; 1718 rtw89_chip_disable_intr(rtwdev, rtwpci); 1719 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1720 } 1721 1722 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1723 { 1724 rtw89_core_napi_start(rtwdev); 1725 rtw89_pci_enable_intr_lock(rtwdev); 1726 1727 return 0; 1728 } 1729 1730 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1731 { 1732 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1733 struct pci_dev *pdev = rtwpci->pdev; 1734 1735 rtw89_pci_disable_intr_lock(rtwdev); 1736 synchronize_irq(pdev->irq); 1737 rtw89_core_napi_stop(rtwdev); 1738 } 1739 1740 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1741 { 1742 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1743 struct pci_dev *pdev = rtwpci->pdev; 1744 1745 if (pause) { 1746 rtw89_pci_disable_intr_lock(rtwdev); 1747 synchronize_irq(pdev->irq); 1748 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1749 napi_synchronize(&rtwdev->napi); 1750 } else { 1751 rtw89_pci_enable_intr_lock(rtwdev); 1752 rtw89_pci_tx_kick_off_pending(rtwdev); 1753 } 1754 } 1755 1756 static 1757 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1758 { 1759 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1760 const struct rtw89_pci_info *info = rtwdev->pci_info; 1761 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1762 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1763 struct rtw89_pci_tx_ring *tx_ring; 1764 struct rtw89_pci_rx_ring *rx_ring; 1765 int i; 1766 1767 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1768 return; 1769 1770 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1771 tx_ring = &rtwpci->tx_rings[i]; 1772 tx_ring->bd_ring.addr.idx = low_power ? 1773 bd_idx_addr->tx_bd_addrs[i] : 1774 dma_addr_set->tx[i].idx; 1775 } 1776 1777 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1778 rx_ring = &rtwpci->rx_rings[i]; 1779 rx_ring->bd_ring.addr.idx = low_power ? 1780 bd_idx_addr->rx_bd_addrs[i] : 1781 dma_addr_set->rx[i].idx; 1782 } 1783 } 1784 1785 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1786 { 1787 enum rtw89_pci_intr_mask_cfg cfg; 1788 1789 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1790 1791 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1792 rtw89_chip_config_intr_mask(rtwdev, cfg); 1793 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1794 } 1795 1796 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1797 1798 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1799 { 1800 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1801 u32 val = readl(rtwpci->mmap + addr); 1802 int count; 1803 1804 for (count = 0; ; count++) { 1805 if (val != RTW89_R32_DEAD) 1806 return val; 1807 if (count >= MAC_REG_POOL_COUNT) { 1808 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1809 return RTW89_R32_DEAD; 1810 } 1811 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1812 val = readl(rtwpci->mmap + addr); 1813 } 1814 1815 return val; 1816 } 1817 1818 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1819 { 1820 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1821 u32 addr32, val32, shift; 1822 1823 if (!ACCESS_CMAC(addr)) 1824 return readb(rtwpci->mmap + addr); 1825 1826 addr32 = addr & ~0x3; 1827 shift = (addr & 0x3) * 8; 1828 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1829 return val32 >> shift; 1830 } 1831 1832 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1833 { 1834 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1835 u32 addr32, val32, shift; 1836 1837 if (!ACCESS_CMAC(addr)) 1838 return readw(rtwpci->mmap + addr); 1839 1840 addr32 = addr & ~0x3; 1841 shift = (addr & 0x3) * 8; 1842 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1843 return val32 >> shift; 1844 } 1845 1846 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1847 { 1848 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1849 1850 if (!ACCESS_CMAC(addr)) 1851 return readl(rtwpci->mmap + addr); 1852 1853 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1854 } 1855 1856 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1857 { 1858 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1859 1860 writeb(data, rtwpci->mmap + addr); 1861 } 1862 1863 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1864 { 1865 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1866 1867 writew(data, rtwpci->mmap + addr); 1868 } 1869 1870 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1871 { 1872 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1873 1874 writel(data, rtwpci->mmap + addr); 1875 } 1876 1877 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1878 { 1879 const struct rtw89_pci_info *info = rtwdev->pci_info; 1880 1881 if (enable) 1882 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1883 info->rxhci_en_bit | info->txhci_en_bit); 1884 else 1885 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1886 info->rxhci_en_bit | info->txhci_en_bit); 1887 } 1888 1889 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1890 { 1891 const struct rtw89_pci_info *info = rtwdev->pci_info; 1892 const struct rtw89_reg_def *reg = &info->dma_io_stop; 1893 1894 if (enable) 1895 rtw89_write32_clr(rtwdev, reg->addr, reg->mask); 1896 else 1897 rtw89_write32_set(rtwdev, reg->addr, reg->mask); 1898 } 1899 1900 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1901 { 1902 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1903 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1904 } 1905 1906 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1907 { 1908 u16 val; 1909 1910 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1911 1912 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1913 switch (speed) { 1914 case PCIE_PHY_GEN1: 1915 if (addr < 0x20) 1916 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1917 else 1918 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1919 break; 1920 case PCIE_PHY_GEN2: 1921 if (addr < 0x20) 1922 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1923 else 1924 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1925 break; 1926 default: 1927 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1928 return -EINVAL; 1929 } 1930 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1931 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1932 1933 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1934 false, rtwdev, R_AX_MDIO_CFG); 1935 } 1936 1937 static int 1938 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1939 { 1940 int ret; 1941 1942 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1943 if (ret) { 1944 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1945 return ret; 1946 } 1947 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1948 1949 return 0; 1950 } 1951 1952 static int 1953 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1954 { 1955 int ret; 1956 1957 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1958 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1959 if (ret) { 1960 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1961 return ret; 1962 } 1963 1964 return 0; 1965 } 1966 1967 static int 1968 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1969 { 1970 u32 shift; 1971 int ret; 1972 u16 val; 1973 1974 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1975 if (ret) 1976 return ret; 1977 1978 shift = __ffs(mask); 1979 val &= ~mask; 1980 val |= ((data << shift) & mask); 1981 1982 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1983 if (ret) 1984 return ret; 1985 1986 return 0; 1987 } 1988 1989 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1990 { 1991 int ret; 1992 u16 val; 1993 1994 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1995 if (ret) 1996 return ret; 1997 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1998 if (ret) 1999 return ret; 2000 2001 return 0; 2002 } 2003 2004 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2005 { 2006 int ret; 2007 u16 val; 2008 2009 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2010 if (ret) 2011 return ret; 2012 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 2013 if (ret) 2014 return ret; 2015 2016 return 0; 2017 } 2018 2019 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) 2020 { 2021 u16 addr_2lsb = addr & B_AX_DBI_2LSB; 2022 u16 write_addr; 2023 u8 flag; 2024 int ret; 2025 2026 write_addr = addr & B_AX_DBI_ADDR_MSK; 2027 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK); 2028 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data); 2029 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); 2030 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); 2031 2032 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2033 10 * RTW89_PCI_WR_RETRY_CNT, false, 2034 rtwdev, R_AX_DBI_FLAG + 2); 2035 if (ret) 2036 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n", 2037 addr); 2038 2039 return ret; 2040 } 2041 2042 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) 2043 { 2044 u16 read_addr = addr & B_AX_DBI_ADDR_MSK; 2045 u8 flag; 2046 int ret; 2047 2048 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); 2049 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); 2050 2051 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2052 10 * RTW89_PCI_WR_RETRY_CNT, false, 2053 rtwdev, R_AX_DBI_FLAG + 2); 2054 if (ret) { 2055 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n", 2056 addr); 2057 return ret; 2058 } 2059 2060 read_addr = R_AX_DBI_RDATA + (addr & 3); 2061 *value = rtw89_read8(rtwdev, read_addr); 2062 2063 return 0; 2064 } 2065 2066 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2067 u8 data) 2068 { 2069 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2070 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2071 struct pci_dev *pdev = rtwpci->pdev; 2072 int ret; 2073 2074 ret = pci_write_config_byte(pdev, addr, data); 2075 if (!ret) 2076 return 0; 2077 2078 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2079 ret = rtw89_dbi_write8(rtwdev, addr, data); 2080 2081 return ret; 2082 } 2083 2084 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2085 u8 *value) 2086 { 2087 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2088 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2089 struct pci_dev *pdev = rtwpci->pdev; 2090 int ret; 2091 2092 ret = pci_read_config_byte(pdev, addr, value); 2093 if (!ret) 2094 return 0; 2095 2096 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2097 ret = rtw89_dbi_read8(rtwdev, addr, value); 2098 2099 return ret; 2100 } 2101 2102 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 2103 u8 bit) 2104 { 2105 u8 value; 2106 int ret; 2107 2108 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2109 if (ret) 2110 return ret; 2111 2112 value |= bit; 2113 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2114 2115 return ret; 2116 } 2117 2118 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 2119 u8 bit) 2120 { 2121 u8 value; 2122 int ret; 2123 2124 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2125 if (ret) 2126 return ret; 2127 2128 value &= ~bit; 2129 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2130 2131 return ret; 2132 } 2133 2134 static int 2135 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 2136 { 2137 u16 val, tar; 2138 int ret; 2139 2140 /* Enable counter */ 2141 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 2142 if (ret) 2143 return ret; 2144 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2145 phy_rate); 2146 if (ret) 2147 return ret; 2148 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 2149 phy_rate); 2150 if (ret) 2151 return ret; 2152 2153 fsleep(300); 2154 2155 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 2156 if (ret) 2157 return ret; 2158 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2159 phy_rate); 2160 if (ret) 2161 return ret; 2162 2163 tar = tar & 0x0FFF; 2164 if (tar == 0 || tar == 0x0FFF) { 2165 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 2166 return -EINVAL; 2167 } 2168 2169 *target = tar; 2170 2171 return 0; 2172 } 2173 2174 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 2175 { 2176 int ret; 2177 2178 if (!rtw89_is_rtl885xb(rtwdev)) 2179 return 0; 2180 2181 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 2182 PCIE_AUTOK_4, PCIE_PHY_GEN1); 2183 return ret; 2184 } 2185 2186 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 2187 { 2188 enum rtw89_pcie_phy phy_rate; 2189 u16 val16, mgn_set, div_set, tar; 2190 u8 val8, bdr_ori; 2191 bool l1_flag = false; 2192 int ret = 0; 2193 2194 if (!rtw89_is_rtl885xb(rtwdev)) 2195 return 0; 2196 2197 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 2198 if (ret) { 2199 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 2200 RTW89_PCIE_PHY_RATE); 2201 return ret; 2202 } 2203 2204 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 2205 phy_rate = PCIE_PHY_GEN1; 2206 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 2207 phy_rate = PCIE_PHY_GEN2; 2208 } else { 2209 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 2210 return -EOPNOTSUPP; 2211 } 2212 /* Disable L1BD */ 2213 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 2214 if (ret) { 2215 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 2216 return ret; 2217 } 2218 2219 if (bdr_ori & RTW89_PCIE_BIT_L1) { 2220 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2221 bdr_ori & ~RTW89_PCIE_BIT_L1); 2222 if (ret) { 2223 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2224 RTW89_PCIE_L1_CTRL); 2225 return ret; 2226 } 2227 l1_flag = true; 2228 } 2229 2230 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2231 if (ret) { 2232 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2233 goto end; 2234 } 2235 2236 if (val16 & B_AX_CALIB_EN) { 2237 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 2238 val16 & ~B_AX_CALIB_EN, phy_rate); 2239 if (ret) { 2240 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2241 goto end; 2242 } 2243 } 2244 2245 if (!autook_en) 2246 goto end; 2247 /* Set div */ 2248 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 2249 if (ret) { 2250 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2251 goto end; 2252 } 2253 2254 /* Obtain div and margin */ 2255 ret = __get_target(rtwdev, &tar, phy_rate); 2256 if (ret) { 2257 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2258 goto end; 2259 } 2260 2261 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2262 2263 if (mgn_set >= 128) { 2264 div_set = 0x0003; 2265 mgn_set = 0x000F; 2266 } else if (mgn_set >= 64) { 2267 div_set = 0x0003; 2268 mgn_set >>= 3; 2269 } else if (mgn_set >= 32) { 2270 div_set = 0x0002; 2271 mgn_set >>= 2; 2272 } else if (mgn_set >= 16) { 2273 div_set = 0x0001; 2274 mgn_set >>= 1; 2275 } else if (mgn_set == 0) { 2276 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2277 goto end; 2278 } else { 2279 div_set = 0x0000; 2280 } 2281 2282 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2283 if (ret) { 2284 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2285 goto end; 2286 } 2287 2288 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2289 2290 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2291 if (ret) { 2292 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2293 goto end; 2294 } 2295 2296 ret = __get_target(rtwdev, &tar, phy_rate); 2297 if (ret) { 2298 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2299 goto end; 2300 } 2301 2302 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2303 tar, div_set, mgn_set); 2304 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2305 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2306 if (ret) { 2307 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2308 goto end; 2309 } 2310 2311 /* Enable function */ 2312 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2313 if (ret) { 2314 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2315 goto end; 2316 } 2317 2318 /* CLK delay = 0 */ 2319 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2320 PCIE_CLKDLY_HW_0); 2321 2322 end: 2323 /* Set L1BD to ori */ 2324 if (l1_flag) { 2325 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2326 bdr_ori); 2327 if (ret) { 2328 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2329 RTW89_PCIE_L1_CTRL); 2330 return ret; 2331 } 2332 } 2333 2334 return ret; 2335 } 2336 2337 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2338 { 2339 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2340 int ret; 2341 2342 if (chip_id == RTL8852A) { 2343 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2344 PCIE_PHY_GEN1); 2345 if (ret) 2346 return ret; 2347 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2348 PCIE_PHY_GEN2); 2349 if (ret) 2350 return ret; 2351 } else if (chip_id == RTL8852C) { 2352 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2353 B_AX_DEGLITCH); 2354 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2355 B_AX_DEGLITCH); 2356 } 2357 2358 return 0; 2359 } 2360 2361 static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev) 2362 { 2363 u16 g1_oobs, g2_oobs; 2364 u32 backup_aspm; 2365 u32 phy_offset; 2366 u16 oobs_val; 2367 int ret; 2368 2369 if (rtwdev->chip->chip_id != RTL8852C) 2370 return; 2371 2372 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2373 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2374 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + 2375 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2376 if (g1_oobs && g2_oobs) 2377 return; 2378 2379 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); 2380 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 2381 2382 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); 2383 if (ret) 2384 goto out; 2385 2386 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN); 2387 rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); 2388 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); 2389 2390 oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, 2391 OOBS_LEVEL_MASK); 2392 2393 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, 2394 OOBS_SEN_MASK, oobs_val); 2395 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, 2396 BAC_OOBS_SEL); 2397 2398 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, 2399 OOBS_SEN_MASK, oobs_val); 2400 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, 2401 BAC_OOBS_SEL); 2402 2403 out: 2404 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm); 2405 } 2406 2407 static void rtw89_pci_ber(struct rtw89_dev *rtwdev) 2408 { 2409 u32 phy_offset; 2410 2411 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks)) 2412 return; 2413 2414 phy_offset = R_RAC_DIRECT_OFFSET_G1; 2415 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL); 2416 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2417 2418 phy_offset = R_RAC_DIRECT_OFFSET_G2; 2419 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL); 2420 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2421 } 2422 2423 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2424 { 2425 if (rtwdev->chip->chip_id != RTL8852A) 2426 return; 2427 2428 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2429 } 2430 2431 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2432 { 2433 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2434 2435 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2436 return; 2437 2438 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2439 } 2440 2441 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2442 { 2443 int ret; 2444 2445 if (rtwdev->chip->chip_id != RTL8852A) 2446 return 0; 2447 2448 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2449 PCIE_PHY_GEN1); 2450 if (ret) 2451 return ret; 2452 2453 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2454 PCIE_PHY_GEN2); 2455 if (ret) 2456 return ret; 2457 2458 return 0; 2459 } 2460 2461 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2462 { 2463 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2464 2465 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2466 return; 2467 2468 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2469 } 2470 2471 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2472 { 2473 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2474 2475 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2476 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2477 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2478 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2479 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2480 } else if (rtwdev->chip->chip_id == RTL8852C) { 2481 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2482 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2483 } 2484 } 2485 2486 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2487 { 2488 if (!rtw89_is_rtl885xb(rtwdev)) 2489 return 0; 2490 2491 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2492 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2493 } 2494 2495 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2496 { 2497 if (pwr_up) 2498 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2499 else 2500 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2501 } 2502 2503 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2504 { 2505 if (rtwdev->chip->chip_id != RTL8852C) 2506 return; 2507 2508 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2509 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2510 } 2511 2512 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2513 { 2514 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2515 return; 2516 2517 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2518 } 2519 2520 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2521 { 2522 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2523 return; 2524 2525 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2526 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2527 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2528 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2529 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2530 } 2531 2532 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2533 { 2534 if (rtwdev->chip->chip_id != RTL8852C) 2535 return; 2536 2537 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2538 } 2539 2540 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2541 { 2542 if (rtwdev->chip->chip_id != RTL8852C) 2543 return; 2544 2545 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2546 } 2547 2548 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2549 { 2550 if (rtwdev->chip->chip_id == RTL8852C) 2551 return; 2552 2553 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2554 B_AX_SIC_EN_FORCE_CLKREQ); 2555 } 2556 2557 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2558 { 2559 const struct rtw89_pci_info *info = rtwdev->pci_info; 2560 u32 lbc; 2561 2562 if (rtwdev->chip->chip_id == RTL8852C) 2563 return; 2564 2565 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2566 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2567 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2568 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2569 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2570 } else { 2571 lbc &= ~B_AX_LBC_EN; 2572 } 2573 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2574 } 2575 2576 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2577 { 2578 const struct rtw89_pci_info *info = rtwdev->pci_info; 2579 u32 val32; 2580 2581 if (rtwdev->chip->chip_id != RTL8852C) 2582 return; 2583 2584 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2585 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2586 info->io_rcy_tmr); 2587 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2588 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2589 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2590 2591 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2592 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2593 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2594 } else { 2595 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2596 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2597 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2598 } 2599 2600 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2601 } 2602 2603 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2604 { 2605 if (rtwdev->chip->chip_id == RTL8852C) 2606 return; 2607 2608 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2609 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2610 2611 if (rtwdev->chip->chip_id == RTL8852A) 2612 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2613 B_AX_EN_CHKDSC_NO_RX_STUCK); 2614 } 2615 2616 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2617 { 2618 if (rtwdev->chip->chip_id == RTL8852C) 2619 return; 2620 2621 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2622 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2623 } 2624 2625 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) 2626 { 2627 const struct rtw89_pci_info *info = rtwdev->pci_info; 2628 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2629 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2630 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2631 B_AX_CLR_CH12_IDX; 2632 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2633 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2634 2635 if (chip_id == RTL8852A || chip_id == RTL8852C) 2636 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2637 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2638 /* clear DMA indexes */ 2639 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2640 if (chip_id == RTL8852A || chip_id == RTL8852C) 2641 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2642 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2643 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2644 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2645 } 2646 2647 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2648 { 2649 const struct rtw89_pci_info *info = rtwdev->pci_info; 2650 u32 ret, check, dma_busy; 2651 u32 dma_busy1 = info->dma_busy1.addr; 2652 u32 dma_busy2 = info->dma_busy2_reg; 2653 2654 check = info->dma_busy1.mask; 2655 2656 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2657 10, 100, false, rtwdev, dma_busy1); 2658 if (ret) 2659 return ret; 2660 2661 if (!dma_busy2) 2662 return 0; 2663 2664 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2665 2666 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2667 10, 100, false, rtwdev, dma_busy2); 2668 if (ret) 2669 return ret; 2670 2671 return 0; 2672 } 2673 2674 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2675 { 2676 const struct rtw89_pci_info *info = rtwdev->pci_info; 2677 u32 ret, check, dma_busy; 2678 u32 dma_busy3 = info->dma_busy3_reg; 2679 2680 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2681 2682 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2683 10, 100, false, rtwdev, dma_busy3); 2684 if (ret) 2685 return ret; 2686 2687 return 0; 2688 } 2689 2690 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2691 { 2692 u32 ret; 2693 2694 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev); 2695 if (ret) { 2696 rtw89_err(rtwdev, "txdma ch busy\n"); 2697 return ret; 2698 } 2699 2700 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev); 2701 if (ret) { 2702 rtw89_err(rtwdev, "rxdma ch busy\n"); 2703 return ret; 2704 } 2705 2706 return 0; 2707 } 2708 2709 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2710 { 2711 const struct rtw89_pci_info *info = rtwdev->pci_info; 2712 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2713 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2714 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2715 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2716 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2717 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2718 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2719 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2720 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2721 u8 cv = rtwdev->hal.cv; 2722 u32 val32; 2723 2724 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2725 if (chip_id == RTL8852A && cv == CHIP_CBV) 2726 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2727 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2728 if (chip_id == RTL8852A || chip_id == RTL8852B) 2729 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2730 } 2731 2732 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2733 if (chip_id == RTL8852A && cv == CHIP_CBV) 2734 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2735 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2736 if (chip_id == RTL8852A || chip_id == RTL8852B) 2737 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2738 } 2739 2740 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2741 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2742 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2743 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2744 2745 if (chip_id == RTL8852A || chip_id == RTL8852B) 2746 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2747 B_AX_PCIE_RX_APPLEN_MASK, 0); 2748 } 2749 2750 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2751 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2752 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2753 } else if (chip_id == RTL8852C) { 2754 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2755 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2756 } 2757 2758 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2759 if (tag_mode == MAC_AX_TAG_SGL) { 2760 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2761 ~B_AX_LATENCY_CONTROL; 2762 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2763 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2764 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2765 B_AX_LATENCY_CONTROL; 2766 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2767 } 2768 } 2769 2770 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2771 info->multi_tag_num); 2772 2773 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2774 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2775 wd_dma_idle_intvl); 2776 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2777 wd_dma_act_intvl); 2778 } else if (chip_id == RTL8852C) { 2779 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2780 wd_dma_idle_intvl); 2781 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2782 wd_dma_act_intvl); 2783 } 2784 2785 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2786 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2787 B_AX_HOST_ADDR_INFO_8B_SEL); 2788 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2789 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2790 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2791 B_AX_HOST_ADDR_INFO_8B_SEL); 2792 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2793 } 2794 2795 return 0; 2796 } 2797 2798 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2799 { 2800 const struct rtw89_pci_info *info = rtwdev->pci_info; 2801 2802 if (rtwdev->chip->chip_id == RTL8852A) { 2803 /* ltr sw trigger */ 2804 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2805 } 2806 info->ltr_set(rtwdev, false); 2807 rtw89_pci_ctrl_dma_all(rtwdev, false); 2808 rtw89_pci_clr_idx_all(rtwdev); 2809 2810 return 0; 2811 } 2812 2813 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) 2814 { 2815 const struct rtw89_pci_info *info = rtwdev->pci_info; 2816 int ret; 2817 2818 rtw89_pci_ber(rtwdev); 2819 rtw89_pci_rxdma_prefth(rtwdev); 2820 rtw89_pci_l1off_pwroff(rtwdev); 2821 rtw89_pci_deglitch_setting(rtwdev); 2822 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2823 if (ret) { 2824 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2825 return ret; 2826 } 2827 2828 rtw89_pci_aphy_pwrcut(rtwdev); 2829 rtw89_pci_hci_ldo(rtwdev); 2830 rtw89_pci_dphy_delay(rtwdev); 2831 2832 ret = rtw89_pci_autok_x(rtwdev); 2833 if (ret) { 2834 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2835 return ret; 2836 } 2837 2838 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2839 if (ret) { 2840 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2841 return ret; 2842 } 2843 2844 rtw89_pci_power_wake(rtwdev, true); 2845 rtw89_pci_autoload_hang(rtwdev); 2846 rtw89_pci_l12_vmain(rtwdev); 2847 rtw89_pci_gen2_force_ib(rtwdev); 2848 rtw89_pci_l1_ent_lat(rtwdev); 2849 rtw89_pci_wd_exit_l1(rtwdev); 2850 rtw89_pci_set_sic(rtwdev); 2851 rtw89_pci_set_lbc(rtwdev); 2852 rtw89_pci_set_io_rcy(rtwdev); 2853 rtw89_pci_set_dbg(rtwdev); 2854 rtw89_pci_set_keep_reg(rtwdev); 2855 2856 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2857 2858 /* stop DMA activities */ 2859 rtw89_pci_ctrl_dma_all(rtwdev, false); 2860 2861 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2862 if (ret) { 2863 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2864 return ret; 2865 } 2866 2867 rtw89_pci_clr_idx_all(rtwdev); 2868 rtw89_pci_mode_op(rtwdev); 2869 2870 /* fill TRX BD indexes */ 2871 rtw89_pci_ops_reset(rtwdev); 2872 2873 ret = rtw89_pci_rst_bdram_ax(rtwdev); 2874 if (ret) { 2875 rtw89_warn(rtwdev, "reset bdram busy\n"); 2876 return ret; 2877 } 2878 2879 /* disable all channels except to FW CMD channel to download firmware */ 2880 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false); 2881 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true); 2882 2883 /* start DMA activities */ 2884 rtw89_pci_ctrl_dma_all(rtwdev, true); 2885 2886 return 0; 2887 } 2888 2889 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2890 { 2891 u32 val; 2892 2893 if (!en) 2894 return 0; 2895 2896 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2897 if (rtw89_pci_ltr_is_err_reg_val(val)) 2898 return -EINVAL; 2899 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2900 if (rtw89_pci_ltr_is_err_reg_val(val)) 2901 return -EINVAL; 2902 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2903 if (rtw89_pci_ltr_is_err_reg_val(val)) 2904 return -EINVAL; 2905 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2906 if (rtw89_pci_ltr_is_err_reg_val(val)) 2907 return -EINVAL; 2908 2909 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 2910 B_AX_LTR_WD_NOEMP_CHK); 2911 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2912 PCI_LTR_SPC_500US); 2913 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2914 PCI_LTR_IDLE_TIMER_3_2MS); 2915 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2916 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2917 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 2918 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2919 2920 return 0; 2921 } 2922 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2923 2924 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2925 { 2926 u32 dec_ctrl; 2927 u32 val32; 2928 2929 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2930 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2931 return -EINVAL; 2932 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2933 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2934 return -EINVAL; 2935 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2936 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2937 return -EINVAL; 2938 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2939 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2940 return -EINVAL; 2941 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2942 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2943 return -EINVAL; 2944 2945 if (!en) { 2946 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2947 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2948 B_AX_LTR_REQ_DRV; 2949 } else { 2950 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2951 } 2952 2953 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2954 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2955 2956 if (en) 2957 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2958 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2959 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2960 PCI_LTR_IDLE_TIMER_3_2MS); 2961 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2962 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2963 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2964 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2965 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2966 2967 return 0; 2968 } 2969 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2970 2971 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) 2972 { 2973 const struct rtw89_pci_info *info = rtwdev->pci_info; 2974 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2975 int ret; 2976 2977 ret = info->ltr_set(rtwdev, true); 2978 if (ret) { 2979 rtw89_err(rtwdev, "pci ltr set fail\n"); 2980 return ret; 2981 } 2982 if (chip_id == RTL8852A) { 2983 /* ltr sw trigger */ 2984 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2985 } 2986 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2987 /* ADDR info 8-byte mode */ 2988 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2989 B_AX_HOST_ADDR_INFO_8B_SEL); 2990 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2991 } 2992 2993 /* enable DMA for all queues */ 2994 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true); 2995 2996 /* Release PCI IO */ 2997 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 2998 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2999 3000 return 0; 3001 } 3002 3003 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 3004 struct pci_dev *pdev) 3005 { 3006 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3007 int ret; 3008 3009 ret = pci_enable_device(pdev); 3010 if (ret) { 3011 rtw89_err(rtwdev, "failed to enable pci device\n"); 3012 return ret; 3013 } 3014 3015 pci_set_master(pdev); 3016 pci_set_drvdata(pdev, rtwdev->hw); 3017 3018 rtwpci->pdev = pdev; 3019 3020 return 0; 3021 } 3022 3023 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 3024 struct pci_dev *pdev) 3025 { 3026 pci_disable_device(pdev); 3027 } 3028 3029 static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev) 3030 { 3031 const struct rtw89_chip_info *chip = rtwdev->chip; 3032 3033 switch (chip->chip_id) { 3034 case RTL8852A: 3035 case RTL8852B: 3036 case RTL8851B: 3037 case RTL8852BT: 3038 return true; 3039 default: 3040 return false; 3041 } 3042 } 3043 3044 static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev) 3045 { 3046 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3047 struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev); 3048 3049 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3050 return true; 3051 3052 if (!bridge) 3053 return false; 3054 3055 switch (bridge->vendor) { 3056 case PCI_VENDOR_ID_INTEL: 3057 return true; 3058 case PCI_VENDOR_ID_ASMEDIA: 3059 if (bridge->device == 0x2806) 3060 return true; 3061 break; 3062 } 3063 3064 return false; 3065 } 3066 3067 static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) 3068 { 3069 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3070 3071 if (!rtwpci->enable_dac) 3072 return; 3073 3074 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3075 return; 3076 3077 rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS); 3078 } 3079 3080 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 3081 struct pci_dev *pdev) 3082 { 3083 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3084 unsigned long resource_len; 3085 u8 bar_id = 2; 3086 int ret; 3087 3088 ret = pci_request_regions(pdev, KBUILD_MODNAME); 3089 if (ret) { 3090 rtw89_err(rtwdev, "failed to request pci regions\n"); 3091 goto err; 3092 } 3093 3094 if (!rtw89_pci_is_dac_compatible_bridge(rtwdev)) 3095 goto no_dac; 3096 3097 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); 3098 if (!ret) { 3099 rtwpci->enable_dac = true; 3100 rtw89_pci_cfg_dac(rtwdev); 3101 } else { 3102 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3103 if (ret) { 3104 rtw89_err(rtwdev, 3105 "failed to set dma and consistent mask to 32/36-bit\n"); 3106 goto err_release_regions; 3107 } 3108 } 3109 no_dac: 3110 3111 resource_len = pci_resource_len(pdev, bar_id); 3112 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 3113 if (!rtwpci->mmap) { 3114 rtw89_err(rtwdev, "failed to map pci io\n"); 3115 ret = -EIO; 3116 goto err_release_regions; 3117 } 3118 3119 return 0; 3120 3121 err_release_regions: 3122 pci_release_regions(pdev); 3123 err: 3124 return ret; 3125 } 3126 3127 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 3128 struct pci_dev *pdev) 3129 { 3130 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3131 3132 if (rtwpci->mmap) { 3133 pci_iounmap(pdev, rtwpci->mmap); 3134 pci_release_regions(pdev); 3135 } 3136 } 3137 3138 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 3139 struct pci_dev *pdev, 3140 struct rtw89_pci_tx_ring *tx_ring) 3141 { 3142 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3143 u8 *head = wd_ring->head; 3144 dma_addr_t dma = wd_ring->dma; 3145 u32 page_size = wd_ring->page_size; 3146 u32 page_num = wd_ring->page_num; 3147 u32 ring_sz = page_size * page_num; 3148 3149 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3150 wd_ring->head = NULL; 3151 } 3152 3153 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 3154 struct pci_dev *pdev, 3155 struct rtw89_pci_tx_ring *tx_ring) 3156 { 3157 int ring_sz; 3158 u8 *head; 3159 dma_addr_t dma; 3160 3161 head = tx_ring->bd_ring.head; 3162 dma = tx_ring->bd_ring.dma; 3163 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 3164 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3165 3166 tx_ring->bd_ring.head = NULL; 3167 } 3168 3169 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 3170 struct pci_dev *pdev) 3171 { 3172 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3173 const struct rtw89_pci_info *info = rtwdev->pci_info; 3174 struct rtw89_pci_tx_ring *tx_ring; 3175 int i; 3176 3177 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3178 if (info->tx_dma_ch_mask & BIT(i)) 3179 continue; 3180 tx_ring = &rtwpci->tx_rings[i]; 3181 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3182 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3183 } 3184 } 3185 3186 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 3187 struct pci_dev *pdev, 3188 struct rtw89_pci_rx_ring *rx_ring) 3189 { 3190 struct rtw89_pci_rx_info *rx_info; 3191 struct sk_buff *skb; 3192 dma_addr_t dma; 3193 u32 buf_sz; 3194 u8 *head; 3195 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 3196 int i; 3197 3198 buf_sz = rx_ring->buf_sz; 3199 for (i = 0; i < rx_ring->bd_ring.len; i++) { 3200 skb = rx_ring->buf[i]; 3201 if (!skb) 3202 continue; 3203 3204 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3205 dma = rx_info->dma; 3206 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3207 dev_kfree_skb(skb); 3208 rx_ring->buf[i] = NULL; 3209 } 3210 3211 head = rx_ring->bd_ring.head; 3212 dma = rx_ring->bd_ring.dma; 3213 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3214 3215 rx_ring->bd_ring.head = NULL; 3216 } 3217 3218 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 3219 struct pci_dev *pdev) 3220 { 3221 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3222 struct rtw89_pci_rx_ring *rx_ring; 3223 int i; 3224 3225 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3226 rx_ring = &rtwpci->rx_rings[i]; 3227 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3228 } 3229 } 3230 3231 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 3232 struct pci_dev *pdev) 3233 { 3234 rtw89_pci_free_rx_rings(rtwdev, pdev); 3235 rtw89_pci_free_tx_rings(rtwdev, pdev); 3236 } 3237 3238 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 3239 struct rtw89_pci_rx_ring *rx_ring, 3240 struct sk_buff *skb, int buf_sz, u32 idx) 3241 { 3242 struct rtw89_pci_rx_info *rx_info; 3243 struct rtw89_pci_rx_bd_32 *rx_bd; 3244 dma_addr_t dma; 3245 3246 if (!skb) 3247 return -EINVAL; 3248 3249 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 3250 if (dma_mapping_error(&pdev->dev, dma)) 3251 return -EBUSY; 3252 3253 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3254 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 3255 3256 memset(rx_bd, 0, sizeof(*rx_bd)); 3257 rx_bd->buf_size = cpu_to_le16(buf_sz); 3258 rx_bd->dma = cpu_to_le32(dma); 3259 rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI); 3260 rx_info->dma = dma; 3261 3262 return 0; 3263 } 3264 3265 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 3266 struct pci_dev *pdev, 3267 struct rtw89_pci_tx_ring *tx_ring, 3268 enum rtw89_tx_channel txch) 3269 { 3270 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3271 struct rtw89_pci_tx_wd *txwd; 3272 dma_addr_t dma; 3273 dma_addr_t cur_paddr; 3274 u8 *head; 3275 u8 *cur_vaddr; 3276 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 3277 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 3278 u32 ring_sz = page_size * page_num; 3279 u32 page_offset; 3280 int i; 3281 3282 /* FWCMD queue doesn't use txwd as pages */ 3283 if (txch == RTW89_TXCH_CH12) 3284 return 0; 3285 3286 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3287 if (!head) 3288 return -ENOMEM; 3289 3290 INIT_LIST_HEAD(&wd_ring->free_pages); 3291 wd_ring->head = head; 3292 wd_ring->dma = dma; 3293 wd_ring->page_size = page_size; 3294 wd_ring->page_num = page_num; 3295 3296 page_offset = 0; 3297 for (i = 0; i < page_num; i++) { 3298 txwd = &wd_ring->pages[i]; 3299 cur_paddr = dma + page_offset; 3300 cur_vaddr = head + page_offset; 3301 3302 skb_queue_head_init(&txwd->queue); 3303 INIT_LIST_HEAD(&txwd->list); 3304 txwd->paddr = cur_paddr; 3305 txwd->vaddr = cur_vaddr; 3306 txwd->len = page_size; 3307 txwd->seq = i; 3308 rtw89_pci_enqueue_txwd(tx_ring, txwd); 3309 3310 page_offset += page_size; 3311 } 3312 3313 return 0; 3314 } 3315 3316 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 3317 struct pci_dev *pdev, 3318 struct rtw89_pci_tx_ring *tx_ring, 3319 u32 desc_size, u32 len, 3320 enum rtw89_tx_channel txch) 3321 { 3322 const struct rtw89_pci_ch_dma_addr *txch_addr; 3323 int ring_sz = desc_size * len; 3324 u8 *head; 3325 dma_addr_t dma; 3326 int ret; 3327 3328 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 3329 if (ret) { 3330 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 3331 goto err; 3332 } 3333 3334 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3335 if (ret) { 3336 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3337 goto err_free_wd_ring; 3338 } 3339 3340 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3341 if (!head) { 3342 ret = -ENOMEM; 3343 goto err_free_wd_ring; 3344 } 3345 3346 INIT_LIST_HEAD(&tx_ring->busy_pages); 3347 tx_ring->bd_ring.head = head; 3348 tx_ring->bd_ring.dma = dma; 3349 tx_ring->bd_ring.len = len; 3350 tx_ring->bd_ring.desc_size = desc_size; 3351 tx_ring->bd_ring.addr = *txch_addr; 3352 tx_ring->bd_ring.wp = 0; 3353 tx_ring->bd_ring.rp = 0; 3354 tx_ring->txch = txch; 3355 3356 return 0; 3357 3358 err_free_wd_ring: 3359 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3360 err: 3361 return ret; 3362 } 3363 3364 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 3365 struct pci_dev *pdev) 3366 { 3367 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3368 const struct rtw89_pci_info *info = rtwdev->pci_info; 3369 struct rtw89_pci_tx_ring *tx_ring; 3370 u32 desc_size; 3371 u32 len; 3372 u32 i, tx_allocated; 3373 int ret; 3374 3375 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3376 if (info->tx_dma_ch_mask & BIT(i)) 3377 continue; 3378 tx_ring = &rtwpci->tx_rings[i]; 3379 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3380 len = RTW89_PCI_TXBD_NUM_MAX; 3381 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3382 desc_size, len, i); 3383 if (ret) { 3384 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3385 goto err_free; 3386 } 3387 } 3388 3389 return 0; 3390 3391 err_free: 3392 tx_allocated = i; 3393 for (i = 0; i < tx_allocated; i++) { 3394 tx_ring = &rtwpci->tx_rings[i]; 3395 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3396 } 3397 3398 return ret; 3399 } 3400 3401 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3402 struct pci_dev *pdev, 3403 struct rtw89_pci_rx_ring *rx_ring, 3404 u32 desc_size, u32 len, u32 rxch) 3405 { 3406 const struct rtw89_pci_info *info = rtwdev->pci_info; 3407 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3408 struct sk_buff *skb; 3409 u8 *head; 3410 dma_addr_t dma; 3411 int ring_sz = desc_size * len; 3412 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3413 int i, allocated; 3414 int ret; 3415 3416 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3417 if (ret) { 3418 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3419 return ret; 3420 } 3421 3422 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3423 if (!head) { 3424 ret = -ENOMEM; 3425 goto err; 3426 } 3427 3428 rx_ring->bd_ring.head = head; 3429 rx_ring->bd_ring.dma = dma; 3430 rx_ring->bd_ring.len = len; 3431 rx_ring->bd_ring.desc_size = desc_size; 3432 rx_ring->bd_ring.addr = *rxch_addr; 3433 if (info->rx_ring_eq_is_full) 3434 rx_ring->bd_ring.wp = len - 1; 3435 else 3436 rx_ring->bd_ring.wp = 0; 3437 rx_ring->bd_ring.rp = 0; 3438 rx_ring->buf_sz = buf_sz; 3439 rx_ring->diliver_skb = NULL; 3440 rx_ring->diliver_desc.ready = false; 3441 rx_ring->target_rx_tag = 0; 3442 3443 for (i = 0; i < len; i++) { 3444 skb = dev_alloc_skb(buf_sz); 3445 if (!skb) { 3446 ret = -ENOMEM; 3447 goto err_free; 3448 } 3449 3450 memset(skb->data, 0, buf_sz); 3451 rx_ring->buf[i] = skb; 3452 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3453 buf_sz, i); 3454 if (ret) { 3455 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3456 dev_kfree_skb_any(skb); 3457 rx_ring->buf[i] = NULL; 3458 goto err_free; 3459 } 3460 } 3461 3462 return 0; 3463 3464 err_free: 3465 allocated = i; 3466 for (i = 0; i < allocated; i++) { 3467 skb = rx_ring->buf[i]; 3468 if (!skb) 3469 continue; 3470 dma = *((dma_addr_t *)skb->cb); 3471 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3472 dev_kfree_skb(skb); 3473 rx_ring->buf[i] = NULL; 3474 } 3475 3476 head = rx_ring->bd_ring.head; 3477 dma = rx_ring->bd_ring.dma; 3478 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3479 3480 rx_ring->bd_ring.head = NULL; 3481 err: 3482 return ret; 3483 } 3484 3485 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3486 struct pci_dev *pdev) 3487 { 3488 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3489 struct rtw89_pci_rx_ring *rx_ring; 3490 u32 desc_size; 3491 u32 len; 3492 int i, rx_allocated; 3493 int ret; 3494 3495 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3496 rx_ring = &rtwpci->rx_rings[i]; 3497 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3498 len = RTW89_PCI_RXBD_NUM_MAX; 3499 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3500 desc_size, len, i); 3501 if (ret) { 3502 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3503 goto err_free; 3504 } 3505 } 3506 3507 return 0; 3508 3509 err_free: 3510 rx_allocated = i; 3511 for (i = 0; i < rx_allocated; i++) { 3512 rx_ring = &rtwpci->rx_rings[i]; 3513 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3514 } 3515 3516 return ret; 3517 } 3518 3519 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3520 struct pci_dev *pdev) 3521 { 3522 int ret; 3523 3524 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3525 if (ret) { 3526 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3527 goto err; 3528 } 3529 3530 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3531 if (ret) { 3532 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3533 goto err_free_tx_rings; 3534 } 3535 3536 return 0; 3537 3538 err_free_tx_rings: 3539 rtw89_pci_free_tx_rings(rtwdev, pdev); 3540 err: 3541 return ret; 3542 } 3543 3544 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3545 struct rtw89_pci *rtwpci) 3546 { 3547 skb_queue_head_init(&rtwpci->h2c_queue); 3548 skb_queue_head_init(&rtwpci->h2c_release_queue); 3549 } 3550 3551 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3552 struct pci_dev *pdev) 3553 { 3554 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3555 int ret; 3556 3557 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3558 if (ret) { 3559 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3560 goto err; 3561 } 3562 3563 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3564 if (ret) { 3565 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3566 goto err_pci_unmap; 3567 } 3568 3569 rtw89_pci_h2c_init(rtwdev, rtwpci); 3570 3571 spin_lock_init(&rtwpci->irq_lock); 3572 spin_lock_init(&rtwpci->trx_lock); 3573 3574 return 0; 3575 3576 err_pci_unmap: 3577 rtw89_pci_clear_mapping(rtwdev, pdev); 3578 err: 3579 return ret; 3580 } 3581 3582 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3583 struct pci_dev *pdev) 3584 { 3585 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3586 3587 rtw89_pci_free_trx_rings(rtwdev, pdev); 3588 rtw89_pci_clear_mapping(rtwdev, pdev); 3589 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3590 skb_queue_len(&rtwpci->h2c_queue), true); 3591 } 3592 3593 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3594 { 3595 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3596 const struct rtw89_chip_info *chip = rtwdev->chip; 3597 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; 3598 3599 if (chip->chip_id == RTL8851B) 3600 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; 3601 3602 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3603 3604 if (rtwpci->under_recovery) { 3605 rtwpci->intrs[0] = hs0isr_ind_int_en; 3606 rtwpci->intrs[1] = 0; 3607 } else { 3608 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3609 B_AX_RXDMA_INT_EN | 3610 B_AX_RXP1DMA_INT_EN | 3611 B_AX_RPQDMA_INT_EN | 3612 B_AX_RXDMA_STUCK_INT_EN | 3613 B_AX_RDU_INT_EN | 3614 B_AX_RPQBD_FULL_INT_EN | 3615 hs0isr_ind_int_en; 3616 3617 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3618 } 3619 } 3620 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3621 3622 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3623 { 3624 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3625 3626 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3627 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3628 rtwpci->intrs[0] = 0; 3629 rtwpci->intrs[1] = 0; 3630 } 3631 3632 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3633 { 3634 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3635 3636 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3637 B_AX_HS1ISR_IND_INT_EN | 3638 B_AX_HS0ISR_IND_INT_EN; 3639 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3640 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3641 B_AX_RXDMA_INT_EN | 3642 B_AX_RXP1DMA_INT_EN | 3643 B_AX_RPQDMA_INT_EN | 3644 B_AX_RXDMA_STUCK_INT_EN | 3645 B_AX_RDU_INT_EN | 3646 B_AX_RPQBD_FULL_INT_EN; 3647 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3648 } 3649 3650 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3651 { 3652 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3653 3654 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3655 B_AX_HS0ISR_IND_INT_EN; 3656 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3657 rtwpci->intrs[0] = 0; 3658 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3659 } 3660 3661 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3662 { 3663 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3664 3665 if (rtwpci->under_recovery) 3666 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3667 else if (rtwpci->low_power) 3668 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3669 else 3670 rtw89_pci_default_intr_mask_v1(rtwdev); 3671 } 3672 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3673 3674 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev) 3675 { 3676 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3677 3678 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3679 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3680 rtwpci->intrs[0] = 0; 3681 rtwpci->intrs[1] = 0; 3682 } 3683 3684 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev) 3685 { 3686 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3687 3688 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 | 3689 B_BE_HS0_IND_INT_EN0; 3690 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3691 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 | 3692 B_BE_RDU_CH0_INT_IMR_V1; 3693 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3694 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3695 } 3696 3697 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev) 3698 { 3699 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3700 3701 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 | 3702 B_BE_HS1_IND_INT_EN0; 3703 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3704 rtwpci->intrs[0] = 0; 3705 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3706 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3707 } 3708 3709 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev) 3710 { 3711 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3712 3713 if (rtwpci->under_recovery) 3714 rtw89_pci_recovery_intr_mask_v2(rtwdev); 3715 else if (rtwpci->low_power) 3716 rtw89_pci_low_power_intr_mask_v2(rtwdev); 3717 else 3718 rtw89_pci_default_intr_mask_v2(rtwdev); 3719 } 3720 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); 3721 3722 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3723 struct pci_dev *pdev) 3724 { 3725 unsigned long flags = 0; 3726 int ret; 3727 3728 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI; 3729 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3730 if (ret < 0) { 3731 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3732 goto err; 3733 } 3734 3735 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3736 rtw89_pci_interrupt_handler, 3737 rtw89_pci_interrupt_threadfn, 3738 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3739 if (ret) { 3740 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3741 goto err_free_vector; 3742 } 3743 3744 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3745 3746 return 0; 3747 3748 err_free_vector: 3749 pci_free_irq_vectors(pdev); 3750 err: 3751 return ret; 3752 } 3753 3754 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3755 struct pci_dev *pdev) 3756 { 3757 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3758 pci_free_irq_vectors(pdev); 3759 } 3760 3761 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3762 { 3763 u16 bin = 0, gray_bit; 3764 u32 bit_idx; 3765 3766 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3767 gray_bit = (gray_code >> bit_idx) & 0x1; 3768 if (bit_num - bit_idx > 1) 3769 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3770 bin |= (gray_bit << bit_idx); 3771 } 3772 3773 return bin; 3774 } 3775 3776 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3777 { 3778 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3779 struct pci_dev *pdev = rtwpci->pdev; 3780 u16 val16, filter_out_val; 3781 u32 val, phy_offset; 3782 int ret; 3783 3784 if (rtwdev->chip->chip_id != RTL8852C) 3785 return 0; 3786 3787 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3788 if (val == B_AX_ASPM_CTRL_L1) 3789 return 0; 3790 3791 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3792 if (ret) 3793 return ret; 3794 3795 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3796 if (val == RTW89_PCIE_GEN1_SPEED) { 3797 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3798 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3799 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3800 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3801 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3802 val16 | B_PCIE_BIT_PINOUT_DIS); 3803 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3804 val16 & ~B_PCIE_BIT_RD_SEL); 3805 3806 val16 = rtw89_read16_mask(rtwdev, 3807 phy_offset + RAC_ANA1F * RAC_MULT, 3808 FILTER_OUT_EQ_MASK); 3809 val16 = gray_code_to_bin(val16, hweight16(val16)); 3810 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3811 RAC_MULT); 3812 filter_out_val &= ~REG_FILTER_OUT_MASK; 3813 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3814 3815 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3816 filter_out_val); 3817 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3818 B_BAC_EQ_SEL); 3819 rtw89_write16_set(rtwdev, 3820 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3821 B_PCIE_BIT_PSAVE); 3822 } else { 3823 return -EOPNOTSUPP; 3824 } 3825 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3826 B_PCIE_BIT_PSAVE); 3827 3828 return 0; 3829 } 3830 3831 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3832 { 3833 const struct rtw89_pci_info *info = rtwdev->pci_info; 3834 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3835 3836 if (rtw89_pci_disable_clkreq) 3837 return; 3838 3839 gen_def->clkreq_set(rtwdev, enable); 3840 } 3841 3842 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) 3843 { 3844 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3845 int ret; 3846 3847 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3848 PCIE_CLKDLY_HW_30US); 3849 if (ret) 3850 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3851 3852 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3853 if (enable) 3854 ret = rtw89_pci_config_byte_set(rtwdev, 3855 RTW89_PCIE_L1_CTRL, 3856 RTW89_PCIE_BIT_CLK); 3857 else 3858 ret = rtw89_pci_config_byte_clr(rtwdev, 3859 RTW89_PCIE_L1_CTRL, 3860 RTW89_PCIE_BIT_CLK); 3861 if (ret) 3862 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3863 enable ? "set" : "unset", ret); 3864 } else if (chip_id == RTL8852C) { 3865 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3866 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3867 if (enable) 3868 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3869 B_AX_CLK_REQ_N); 3870 else 3871 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3872 B_AX_CLK_REQ_N); 3873 } 3874 } 3875 3876 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3877 { 3878 const struct rtw89_pci_info *info = rtwdev->pci_info; 3879 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3880 3881 if (rtw89_pci_disable_aspm_l1) 3882 return; 3883 3884 gen_def->aspm_set(rtwdev, enable); 3885 } 3886 3887 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) 3888 { 3889 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3890 u8 value = 0; 3891 int ret; 3892 3893 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3894 if (ret) 3895 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 3896 3897 u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK); 3898 u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK); 3899 3900 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3901 if (ret) 3902 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 3903 3904 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3905 if (enable) 3906 ret = rtw89_pci_config_byte_set(rtwdev, 3907 RTW89_PCIE_L1_CTRL, 3908 RTW89_PCIE_BIT_L1); 3909 else 3910 ret = rtw89_pci_config_byte_clr(rtwdev, 3911 RTW89_PCIE_L1_CTRL, 3912 RTW89_PCIE_BIT_L1); 3913 } else if (chip_id == RTL8852C) { 3914 if (enable) 3915 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3916 B_AX_ASPM_CTRL_L1); 3917 else 3918 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3919 B_AX_ASPM_CTRL_L1); 3920 } 3921 if (ret) 3922 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3923 enable ? "set" : "unset", ret); 3924 } 3925 3926 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3927 { 3928 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 3929 const struct rtw89_pci_info *info = rtwdev->pci_info; 3930 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3931 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3932 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3933 u32 val = 0; 3934 3935 if (rtwdev->scanning || 3936 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH)) 3937 goto out; 3938 3939 if (chip_gen == RTW89_CHIP_BE) 3940 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN; 3941 else 3942 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3943 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3944 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3945 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3946 3947 out: 3948 rtw89_write32(rtwdev, info->mit_addr, val); 3949 } 3950 3951 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3952 { 3953 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3954 struct pci_dev *pdev = rtwpci->pdev; 3955 u16 link_ctrl; 3956 int ret; 3957 3958 /* Though there is standard PCIE configuration space to set the 3959 * link control register, but by Realtek's design, driver should 3960 * check if host supports CLKREQ/ASPM to enable the HW module. 3961 * 3962 * These functions are implemented by two HW modules associated, 3963 * one is responsible to access PCIE configuration space to 3964 * follow the host settings, and another is in charge of doing 3965 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3966 * the host does not support it, and due to some reasons or wrong 3967 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3968 * loss if HW misbehaves on the link. 3969 * 3970 * Hence it's designed that driver should first check the PCIE 3971 * configuration space is sync'ed and enabled, then driver can turn 3972 * on the other module that is actually working on the mechanism. 3973 */ 3974 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3975 if (ret) { 3976 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3977 return; 3978 } 3979 3980 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3981 rtw89_pci_clkreq_set(rtwdev, true); 3982 3983 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3984 rtw89_pci_aspm_set(rtwdev, true); 3985 } 3986 3987 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3988 { 3989 const struct rtw89_pci_info *info = rtwdev->pci_info; 3990 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3991 3992 if (rtw89_pci_disable_l1ss) 3993 return; 3994 3995 gen_def->l1ss_set(rtwdev, enable); 3996 } 3997 3998 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) 3999 { 4000 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4001 int ret; 4002 4003 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4004 if (enable) 4005 ret = rtw89_pci_config_byte_set(rtwdev, 4006 RTW89_PCIE_TIMER_CTRL, 4007 RTW89_PCIE_BIT_L1SUB); 4008 else 4009 ret = rtw89_pci_config_byte_clr(rtwdev, 4010 RTW89_PCIE_TIMER_CTRL, 4011 RTW89_PCIE_BIT_L1SUB); 4012 if (ret) 4013 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 4014 enable ? "set" : "unset", ret); 4015 } else if (chip_id == RTL8852C) { 4016 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 4017 RTW89_PCIE_BIT_ASPM_L11 | 4018 RTW89_PCIE_BIT_PCI_L11); 4019 if (ret) 4020 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 4021 if (enable) 4022 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4023 B_AX_L1SUB_DISABLE); 4024 else 4025 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4026 B_AX_L1SUB_DISABLE); 4027 } 4028 } 4029 4030 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 4031 { 4032 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4033 struct pci_dev *pdev = rtwpci->pdev; 4034 u32 l1ss_cap_ptr, l1ss_ctrl; 4035 4036 if (rtw89_pci_disable_l1ss) 4037 return; 4038 4039 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 4040 if (!l1ss_cap_ptr) 4041 return; 4042 4043 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 4044 4045 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 4046 rtw89_pci_l1ss_set(rtwdev, true); 4047 } 4048 4049 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev) 4050 { 4051 int ret = 0; 4052 u32 sts; 4053 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 4054 4055 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 4056 10, 1000, false, rtwdev, 4057 R_AX_PCIE_DMA_BUSY1); 4058 if (ret) { 4059 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 4060 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 4061 return -EINVAL; 4062 } 4063 return ret; 4064 } 4065 4066 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev) 4067 { 4068 u32 val; 4069 int ret; 4070 4071 if (rtwdev->chip->chip_id == RTL8852C) 4072 return 0; 4073 4074 rtw89_pci_ctrl_dma_all(rtwdev, false); 4075 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4076 if (ret) { 4077 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4078 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4079 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 4080 R_AX_DBG_ERR_FLAG, val); 4081 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 4082 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 4083 if (val & B_AX_RX_STUCK) 4084 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 4085 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4086 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4087 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4088 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4089 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 4090 R_AX_DBG_ERR_FLAG, val); 4091 } 4092 4093 return ret; 4094 } 4095 4096 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) 4097 { 4098 u32 ret; 4099 4100 if (rtwdev->chip->chip_id == RTL8852C) 4101 return 0; 4102 4103 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 4104 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4105 rtw89_pci_clr_idx_all(rtwdev); 4106 4107 ret = rtw89_pci_rst_bdram_ax(rtwdev); 4108 if (ret) 4109 return ret; 4110 4111 rtw89_pci_ctrl_dma_all(rtwdev, true); 4112 return ret; 4113 } 4114 4115 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 4116 enum rtw89_lv1_rcvy_step step) 4117 { 4118 const struct rtw89_pci_info *info = rtwdev->pci_info; 4119 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4120 int ret; 4121 4122 switch (step) { 4123 case RTW89_LV1_RCVY_STEP_1: 4124 ret = gen_def->lv1rst_stop_dma(rtwdev); 4125 if (ret) 4126 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 4127 4128 break; 4129 4130 case RTW89_LV1_RCVY_STEP_2: 4131 ret = gen_def->lv1rst_start_dma(rtwdev); 4132 if (ret) 4133 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 4134 break; 4135 4136 default: 4137 return -EINVAL; 4138 } 4139 4140 return ret; 4141 } 4142 4143 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 4144 { 4145 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 4146 return; 4147 4148 if (rtwdev->chip->chip_id == RTL8852C) { 4149 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4150 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1)); 4151 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4152 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1)); 4153 } else { 4154 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 4155 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 4156 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4157 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 4158 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4159 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 4160 } 4161 } 4162 4163 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 4164 { 4165 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 4166 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4167 const struct rtw89_pci_info *info = rtwdev->pci_info; 4168 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4169 unsigned long flags; 4170 int work_done; 4171 4172 rtwdev->napi_budget_countdown = budget; 4173 4174 rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data); 4175 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4176 if (work_done == budget) 4177 return budget; 4178 4179 rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data); 4180 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4181 if (work_done < budget && napi_complete_done(napi, work_done)) { 4182 spin_lock_irqsave(&rtwpci->irq_lock, flags); 4183 if (likely(rtwpci->running)) 4184 rtw89_chip_enable_intr(rtwdev, rtwpci); 4185 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 4186 } 4187 4188 return work_done; 4189 } 4190 4191 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 4192 { 4193 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4194 struct rtw89_dev *rtwdev = hw->priv; 4195 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4196 4197 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4198 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4199 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4200 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4201 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 4202 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4203 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 4204 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4205 } else { 4206 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4207 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4208 } 4209 4210 return 0; 4211 } 4212 4213 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 4214 { 4215 if (rtwdev->chip->chip_id == RTL8852C) 4216 return; 4217 4218 /* Hardware need write the reg twice to ensure the setting work */ 4219 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4220 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4221 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4222 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4223 } 4224 4225 static int __maybe_unused rtw89_pci_resume(struct device *dev) 4226 { 4227 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4228 struct rtw89_dev *rtwdev = hw->priv; 4229 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4230 4231 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4232 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4233 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4234 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4235 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 4236 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4237 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 4238 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4239 } else { 4240 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4241 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4242 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4243 B_AX_SEL_REQ_ENTR_L1); 4244 } 4245 rtw89_pci_l2_hci_ldo(rtwdev); 4246 rtw89_pci_disable_eq(rtwdev); 4247 rtw89_pci_cfg_dac(rtwdev); 4248 rtw89_pci_filter_out(rtwdev); 4249 rtw89_pci_link_cfg(rtwdev); 4250 rtw89_pci_l1ss_cfg(rtwdev); 4251 4252 return 0; 4253 } 4254 4255 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 4256 EXPORT_SYMBOL(rtw89_pm_ops); 4257 4258 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4259 .isr_rdu = B_AX_RDU_INT, 4260 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, 4261 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, 4262 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, 4263 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | 4264 B_AX_RDU_INT}, 4265 4266 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4267 .mac_pre_deinit = NULL, 4268 .mac_post_init = rtw89_pci_ops_mac_post_init_ax, 4269 4270 .clr_idx_all = rtw89_pci_clr_idx_all_ax, 4271 .rst_bdram = rtw89_pci_rst_bdram_ax, 4272 4273 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax, 4274 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax, 4275 4276 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax, 4277 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax, 4278 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax, 4279 4280 .aspm_set = rtw89_pci_aspm_set_ax, 4281 .clkreq_set = rtw89_pci_clkreq_set_ax, 4282 .l1ss_set = rtw89_pci_l1ss_set_ax, 4283 }; 4284 EXPORT_SYMBOL(rtw89_pci_gen_ax); 4285 4286 static const struct rtw89_hci_ops rtw89_pci_ops = { 4287 .tx_write = rtw89_pci_ops_tx_write, 4288 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 4289 .flush_queues = rtw89_pci_ops_flush_queues, 4290 .reset = rtw89_pci_ops_reset, 4291 .start = rtw89_pci_ops_start, 4292 .stop = rtw89_pci_ops_stop, 4293 .pause = rtw89_pci_ops_pause, 4294 .switch_mode = rtw89_pci_ops_switch_mode, 4295 .recalc_int_mit = rtw89_pci_recalc_int_mit, 4296 4297 .read8 = rtw89_pci_ops_read8, 4298 .read16 = rtw89_pci_ops_read16, 4299 .read32 = rtw89_pci_ops_read32, 4300 .write8 = rtw89_pci_ops_write8, 4301 .write16 = rtw89_pci_ops_write16, 4302 .write32 = rtw89_pci_ops_write32, 4303 4304 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 4305 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit, 4306 .mac_post_init = rtw89_pci_ops_mac_post_init, 4307 .deinit = rtw89_pci_ops_deinit, 4308 4309 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 4310 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 4311 .dump_err_status = rtw89_pci_ops_dump_err_status, 4312 .napi_poll = rtw89_pci_napi_poll, 4313 4314 .recovery_start = rtw89_pci_ops_recovery_start, 4315 .recovery_complete = rtw89_pci_ops_recovery_complete, 4316 4317 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch, 4318 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch, 4319 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 4320 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle, 4321 4322 .clr_idx_all = rtw89_pci_clr_idx_all, 4323 .clear = rtw89_pci_clear_resource, 4324 .disable_intr = rtw89_pci_disable_intr_lock, 4325 .enable_intr = rtw89_pci_enable_intr_lock, 4326 .rst_bdram = rtw89_pci_reset_bdram, 4327 }; 4328 4329 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4330 { 4331 struct rtw89_dev *rtwdev; 4332 const struct rtw89_driver_info *info; 4333 const struct rtw89_pci_info *pci_info; 4334 int ret; 4335 4336 info = (const struct rtw89_driver_info *)id->driver_data; 4337 4338 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 4339 sizeof(struct rtw89_pci), 4340 info->chip); 4341 if (!rtwdev) { 4342 dev_err(&pdev->dev, "failed to allocate hw\n"); 4343 return -ENOMEM; 4344 } 4345 4346 pci_info = info->bus.pci; 4347 4348 rtwdev->pci_info = info->bus.pci; 4349 rtwdev->hci.ops = &rtw89_pci_ops; 4350 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 4351 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 4352 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 4353 4354 rtw89_check_quirks(rtwdev, info->quirks); 4355 4356 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 4357 4358 ret = rtw89_core_init(rtwdev); 4359 if (ret) { 4360 rtw89_err(rtwdev, "failed to initialise core\n"); 4361 goto err_release_hw; 4362 } 4363 4364 ret = rtw89_pci_claim_device(rtwdev, pdev); 4365 if (ret) { 4366 rtw89_err(rtwdev, "failed to claim pci device\n"); 4367 goto err_core_deinit; 4368 } 4369 4370 ret = rtw89_pci_setup_resource(rtwdev, pdev); 4371 if (ret) { 4372 rtw89_err(rtwdev, "failed to setup pci resource\n"); 4373 goto err_declaim_pci; 4374 } 4375 4376 ret = rtw89_chip_info_setup(rtwdev); 4377 if (ret) { 4378 rtw89_err(rtwdev, "failed to setup chip information\n"); 4379 goto err_clear_resource; 4380 } 4381 4382 rtw89_pci_disable_eq(rtwdev); 4383 rtw89_pci_filter_out(rtwdev); 4384 rtw89_pci_link_cfg(rtwdev); 4385 rtw89_pci_l1ss_cfg(rtwdev); 4386 4387 ret = rtw89_core_napi_init(rtwdev); 4388 if (ret) { 4389 rtw89_err(rtwdev, "failed to init napi\n"); 4390 goto err_clear_resource; 4391 } 4392 4393 ret = rtw89_pci_request_irq(rtwdev, pdev); 4394 if (ret) { 4395 rtw89_err(rtwdev, "failed to request pci irq\n"); 4396 goto err_deinit_napi; 4397 } 4398 4399 ret = rtw89_core_register(rtwdev); 4400 if (ret) { 4401 rtw89_err(rtwdev, "failed to register core\n"); 4402 goto err_free_irq; 4403 } 4404 4405 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags); 4406 4407 return 0; 4408 4409 err_free_irq: 4410 rtw89_pci_free_irq(rtwdev, pdev); 4411 err_deinit_napi: 4412 rtw89_core_napi_deinit(rtwdev); 4413 err_clear_resource: 4414 rtw89_pci_clear_resource(rtwdev, pdev); 4415 err_declaim_pci: 4416 rtw89_pci_declaim_device(rtwdev, pdev); 4417 err_core_deinit: 4418 rtw89_core_deinit(rtwdev); 4419 err_release_hw: 4420 rtw89_free_ieee80211_hw(rtwdev); 4421 4422 return ret; 4423 } 4424 EXPORT_SYMBOL(rtw89_pci_probe); 4425 4426 void rtw89_pci_remove(struct pci_dev *pdev) 4427 { 4428 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4429 struct rtw89_dev *rtwdev; 4430 4431 rtwdev = hw->priv; 4432 4433 rtw89_pci_free_irq(rtwdev, pdev); 4434 rtw89_core_napi_deinit(rtwdev); 4435 rtw89_core_unregister(rtwdev); 4436 rtw89_pci_clear_resource(rtwdev, pdev); 4437 rtw89_pci_declaim_device(rtwdev, pdev); 4438 rtw89_core_deinit(rtwdev); 4439 rtw89_free_ieee80211_hw(rtwdev); 4440 } 4441 EXPORT_SYMBOL(rtw89_pci_remove); 4442 4443 MODULE_AUTHOR("Realtek Corporation"); 4444 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); 4445 MODULE_LICENSE("Dual BSD/GPL"); 4446