1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 23 { 24 u32 val; 25 int ret; 26 27 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 28 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 29 30 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 31 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 32 rtwdev, R_AX_PCIE_INIT_CFG1); 33 34 if (ret) 35 return -EBUSY; 36 37 return 0; 38 } 39 40 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 41 struct rtw89_pci_dma_ring *bd_ring, 42 u32 cur_idx, bool tx) 43 { 44 u32 cnt, cur_rp, wp, rp, len; 45 46 rp = bd_ring->rp; 47 wp = bd_ring->wp; 48 len = bd_ring->len; 49 50 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 51 if (tx) 52 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 53 else 54 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 55 56 bd_ring->rp = cur_rp; 57 58 return cnt; 59 } 60 61 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_tx_ring *tx_ring) 63 { 64 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 65 u32 addr_idx = bd_ring->addr.idx; 66 u32 cnt, idx; 67 68 idx = rtw89_read32(rtwdev, addr_idx); 69 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 70 71 return cnt; 72 } 73 74 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 75 struct rtw89_pci *rtwpci, 76 u32 cnt, bool release_all) 77 { 78 struct rtw89_pci_tx_data *tx_data; 79 struct sk_buff *skb; 80 u32 qlen; 81 82 while (cnt--) { 83 skb = skb_dequeue(&rtwpci->h2c_queue); 84 if (!skb) { 85 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 86 return; 87 } 88 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 89 } 90 91 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 92 if (!release_all) 93 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 94 95 while (qlen--) { 96 skb = skb_dequeue(&rtwpci->h2c_release_queue); 97 if (!skb) { 98 rtw89_err(rtwdev, "failed to release fwcmd\n"); 99 return; 100 } 101 tx_data = RTW89_PCI_TX_SKB_CB(skb); 102 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 103 DMA_TO_DEVICE); 104 dev_kfree_skb_any(skb); 105 } 106 } 107 108 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 109 struct rtw89_pci *rtwpci) 110 { 111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 112 u32 cnt; 113 114 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 115 if (!cnt) 116 return; 117 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 118 } 119 120 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 121 struct rtw89_pci_rx_ring *rx_ring) 122 { 123 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 124 u32 addr_idx = bd_ring->addr.idx; 125 u32 cnt, idx; 126 127 idx = rtw89_read32(rtwdev, addr_idx); 128 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 129 130 return cnt; 131 } 132 133 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 134 struct sk_buff *skb) 135 { 136 struct rtw89_pci_rx_info *rx_info; 137 dma_addr_t dma; 138 139 rx_info = RTW89_PCI_RX_SKB_CB(skb); 140 dma = rx_info->dma; 141 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 142 DMA_FROM_DEVICE); 143 } 144 145 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 146 struct sk_buff *skb) 147 { 148 struct rtw89_pci_rx_info *rx_info; 149 dma_addr_t dma; 150 151 rx_info = RTW89_PCI_RX_SKB_CB(skb); 152 dma = rx_info->dma; 153 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 154 DMA_FROM_DEVICE); 155 } 156 157 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 158 struct sk_buff *skb) 159 { 160 struct rtw89_pci_rxbd_info *rxbd_info; 161 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 162 163 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 164 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 165 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 166 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 167 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 168 169 return 0; 170 } 171 172 static bool 173 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 174 struct sk_buff *new, 175 const struct sk_buff *skb, u32 offset, 176 const struct rtw89_pci_rx_info *rx_info, 177 const struct rtw89_rx_desc_info *desc_info) 178 { 179 u32 copy_len = rx_info->len - offset; 180 181 if (unlikely(skb_tailroom(new) < copy_len)) { 182 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 183 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 184 rx_info->len, desc_info->pkt_size, offset, fs, ls); 185 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 186 skb->data, rx_info->len); 187 /* length of a single segment skb is desc_info->pkt_size */ 188 if (fs && ls) { 189 copy_len = desc_info->pkt_size; 190 } else { 191 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 192 return false; 193 } 194 } 195 196 skb_put_data(new, skb->data + offset, copy_len); 197 198 return true; 199 } 200 201 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 202 struct rtw89_pci_rx_ring *rx_ring) 203 { 204 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 205 struct rtw89_pci_rx_info *rx_info; 206 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 207 struct sk_buff *new = rx_ring->diliver_skb; 208 struct sk_buff *skb; 209 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 210 u32 offset; 211 u32 cnt = 1; 212 bool fs, ls; 213 int ret; 214 215 skb = rx_ring->buf[bd_ring->wp]; 216 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 217 218 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 219 if (ret) { 220 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 221 bd_ring->wp, ret); 222 goto err_sync_device; 223 } 224 225 rx_info = RTW89_PCI_RX_SKB_CB(skb); 226 fs = rx_info->fs; 227 ls = rx_info->ls; 228 229 if (fs) { 230 if (new) { 231 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 232 "skb should not be ready before first segment start\n"); 233 goto err_sync_device; 234 } 235 if (desc_info->ready) { 236 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 237 goto err_sync_device; 238 } 239 240 rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 241 242 new = dev_alloc_skb(desc_info->pkt_size); 243 if (!new) 244 goto err_sync_device; 245 246 rx_ring->diliver_skb = new; 247 248 /* first segment has RX desc */ 249 offset = desc_info->offset; 250 offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 251 sizeof(struct rtw89_rxdesc_short); 252 } else { 253 offset = sizeof(struct rtw89_pci_rxbd_info); 254 if (!new) { 255 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 256 goto err_sync_device; 257 } 258 } 259 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 260 goto err_sync_device; 261 rtw89_pci_sync_skb_for_device(rtwdev, skb); 262 rtw89_pci_rxbd_increase(rx_ring, 1); 263 264 if (!desc_info->ready) { 265 rtw89_warn(rtwdev, "no rx desc information\n"); 266 goto err_free_resource; 267 } 268 if (ls) { 269 rtw89_core_rx(rtwdev, desc_info, new); 270 rx_ring->diliver_skb = NULL; 271 desc_info->ready = false; 272 } 273 274 return cnt; 275 276 err_sync_device: 277 rtw89_pci_sync_skb_for_device(rtwdev, skb); 278 rtw89_pci_rxbd_increase(rx_ring, 1); 279 err_free_resource: 280 if (new) 281 dev_kfree_skb_any(new); 282 rx_ring->diliver_skb = NULL; 283 desc_info->ready = false; 284 285 return cnt; 286 } 287 288 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 289 struct rtw89_pci_rx_ring *rx_ring, 290 u32 cnt) 291 { 292 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 293 u32 rx_cnt; 294 295 while (cnt && rtwdev->napi_budget_countdown > 0) { 296 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 297 if (!rx_cnt) { 298 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 299 300 /* skip the rest RXBD bufs */ 301 rtw89_pci_rxbd_increase(rx_ring, cnt); 302 break; 303 } 304 305 cnt -= rx_cnt; 306 } 307 308 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 309 } 310 311 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 312 struct rtw89_pci *rtwpci, int budget) 313 { 314 struct rtw89_pci_rx_ring *rx_ring; 315 int countdown = rtwdev->napi_budget_countdown; 316 u32 cnt; 317 318 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 319 320 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 321 if (!cnt) 322 return 0; 323 324 cnt = min_t(u32, budget, cnt); 325 326 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 327 328 /* In case of flushing pending SKBs, the countdown may exceed. */ 329 if (rtwdev->napi_budget_countdown <= 0) 330 return budget; 331 332 return budget - countdown; 333 } 334 335 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 336 struct rtw89_pci_tx_ring *tx_ring, 337 struct sk_buff *skb, u8 tx_status) 338 { 339 struct ieee80211_tx_info *info; 340 341 info = IEEE80211_SKB_CB(skb); 342 ieee80211_tx_info_clear_status(info); 343 344 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 345 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 346 if (tx_status == RTW89_TX_DONE) { 347 info->flags |= IEEE80211_TX_STAT_ACK; 348 tx_ring->tx_acked++; 349 } else { 350 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 351 rtw89_debug(rtwdev, RTW89_DBG_FW, 352 "failed to TX of status %x\n", tx_status); 353 switch (tx_status) { 354 case RTW89_TX_RETRY_LIMIT: 355 tx_ring->tx_retry_lmt++; 356 break; 357 case RTW89_TX_LIFE_TIME: 358 tx_ring->tx_life_time++; 359 break; 360 case RTW89_TX_MACID_DROP: 361 tx_ring->tx_mac_id_drop++; 362 break; 363 default: 364 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 365 break; 366 } 367 } 368 369 ieee80211_tx_status_ni(rtwdev->hw, skb); 370 } 371 372 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 373 { 374 struct rtw89_pci_tx_wd *txwd; 375 u32 cnt; 376 377 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 378 while (cnt--) { 379 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 380 if (!txwd) { 381 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 382 break; 383 } 384 385 list_del_init(&txwd->list); 386 387 /* this skb has been freed by RPP */ 388 if (skb_queue_len(&txwd->queue) == 0) 389 rtw89_pci_enqueue_txwd(tx_ring, txwd); 390 } 391 } 392 393 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 394 struct rtw89_pci_tx_ring *tx_ring) 395 { 396 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 397 struct rtw89_pci_tx_wd *txwd; 398 int i; 399 400 for (i = 0; i < wd_ring->page_num; i++) { 401 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 402 if (!txwd) 403 break; 404 405 list_del_init(&txwd->list); 406 } 407 } 408 409 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 410 struct rtw89_pci_tx_ring *tx_ring, 411 struct rtw89_pci_tx_wd *txwd, u16 seq, 412 u8 tx_status) 413 { 414 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 415 struct rtw89_pci_tx_data *tx_data; 416 struct sk_buff *skb, *tmp; 417 u8 txch = tx_ring->txch; 418 419 if (!list_empty(&txwd->list)) { 420 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 421 /* In low power mode, RPP can receive before updating of TX BD. 422 * In normal mode, it should not happen so give it a warning. 423 */ 424 if (!rtwpci->low_power && !list_empty(&txwd->list)) 425 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 426 txch, seq); 427 } 428 429 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 430 skb_unlink(skb, &txwd->queue); 431 432 tx_data = RTW89_PCI_TX_SKB_CB(skb); 433 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 434 DMA_TO_DEVICE); 435 436 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 437 } 438 439 if (list_empty(&txwd->list)) 440 rtw89_pci_enqueue_txwd(tx_ring, txwd); 441 } 442 443 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 444 struct rtw89_pci_rpp_fmt *rpp) 445 { 446 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 447 struct rtw89_pci_tx_ring *tx_ring; 448 struct rtw89_pci_tx_wd_ring *wd_ring; 449 struct rtw89_pci_tx_wd *txwd; 450 u16 seq; 451 u8 qsel, tx_status, txch; 452 453 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 454 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 455 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 456 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 457 458 if (txch == RTW89_TXCH_CH12) { 459 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 460 return; 461 } 462 463 tx_ring = &rtwpci->tx_rings[txch]; 464 wd_ring = &tx_ring->wd_ring; 465 txwd = &wd_ring->pages[seq]; 466 467 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 468 } 469 470 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 471 struct rtw89_pci_tx_ring *tx_ring) 472 { 473 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 474 struct rtw89_pci_tx_wd *txwd; 475 int i; 476 477 for (i = 0; i < wd_ring->page_num; i++) { 478 txwd = &wd_ring->pages[i]; 479 480 if (!list_empty(&txwd->list)) 481 continue; 482 483 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 484 } 485 } 486 487 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 488 struct rtw89_pci_rx_ring *rx_ring, 489 u32 max_cnt) 490 { 491 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 492 struct rtw89_pci_rx_info *rx_info; 493 struct rtw89_pci_rpp_fmt *rpp; 494 struct rtw89_rx_desc_info desc_info = {}; 495 struct sk_buff *skb; 496 u32 cnt = 0; 497 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 498 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 499 u32 offset; 500 int ret; 501 502 skb = rx_ring->buf[bd_ring->wp]; 503 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 504 505 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 506 if (ret) { 507 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 508 bd_ring->wp, ret); 509 goto err_sync_device; 510 } 511 512 rx_info = RTW89_PCI_RX_SKB_CB(skb); 513 if (!rx_info->fs || !rx_info->ls) { 514 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 515 return cnt; 516 } 517 518 rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 519 520 /* first segment has RX desc */ 521 offset = desc_info.offset; 522 offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 523 sizeof(struct rtw89_rxdesc_short); 524 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 525 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 526 rtw89_pci_release_rpp(rtwdev, rpp); 527 } 528 529 rtw89_pci_sync_skb_for_device(rtwdev, skb); 530 rtw89_pci_rxbd_increase(rx_ring, 1); 531 cnt++; 532 533 return cnt; 534 535 err_sync_device: 536 rtw89_pci_sync_skb_for_device(rtwdev, skb); 537 return 0; 538 } 539 540 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 541 struct rtw89_pci_rx_ring *rx_ring, 542 u32 cnt) 543 { 544 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 545 u32 release_cnt; 546 547 while (cnt) { 548 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 549 if (!release_cnt) { 550 rtw89_err(rtwdev, "failed to release TX skbs\n"); 551 552 /* skip the rest RXBD bufs */ 553 rtw89_pci_rxbd_increase(rx_ring, cnt); 554 break; 555 } 556 557 cnt -= release_cnt; 558 } 559 560 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 561 } 562 563 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 564 struct rtw89_pci *rtwpci, int budget) 565 { 566 struct rtw89_pci_rx_ring *rx_ring; 567 u32 cnt; 568 int work_done; 569 570 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 571 572 spin_lock_bh(&rtwpci->trx_lock); 573 574 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 575 if (cnt == 0) 576 goto out_unlock; 577 578 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 579 580 out_unlock: 581 spin_unlock_bh(&rtwpci->trx_lock); 582 583 /* always release all RPQ */ 584 work_done = min_t(int, cnt, budget); 585 rtwdev->napi_budget_countdown -= work_done; 586 587 return work_done; 588 } 589 590 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 591 struct rtw89_pci *rtwpci) 592 { 593 struct rtw89_pci_rx_ring *rx_ring; 594 struct rtw89_pci_dma_ring *bd_ring; 595 u32 reg_idx; 596 u16 hw_idx, hw_idx_next, host_idx; 597 int i; 598 599 for (i = 0; i < RTW89_RXCH_NUM; i++) { 600 rx_ring = &rtwpci->rx_rings[i]; 601 bd_ring = &rx_ring->bd_ring; 602 603 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 604 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 605 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 606 hw_idx_next = (hw_idx + 1) % bd_ring->len; 607 608 if (hw_idx_next == host_idx) 609 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 610 611 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 612 "%d RXD unavailable, idx=0x%08x, len=%d\n", 613 i, reg_idx, bd_ring->len); 614 } 615 } 616 617 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 618 struct rtw89_pci *rtwpci, 619 struct rtw89_pci_isrs *isrs) 620 { 621 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 622 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 623 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 624 625 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 626 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 627 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 628 } 629 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 630 631 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 632 struct rtw89_pci *rtwpci, 633 struct rtw89_pci_isrs *isrs) 634 { 635 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 636 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 637 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 638 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 639 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 640 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 641 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 642 643 if (isrs->halt_c2h_isrs) 644 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 645 if (isrs->isrs[0]) 646 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 647 if (isrs->isrs[1]) 648 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 649 } 650 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 651 652 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 653 { 654 /* write 1 clear */ 655 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 656 } 657 658 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 659 { 660 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 661 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 662 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 663 } 664 EXPORT_SYMBOL(rtw89_pci_enable_intr); 665 666 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 667 { 668 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 669 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 670 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 671 } 672 EXPORT_SYMBOL(rtw89_pci_disable_intr); 673 674 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 675 { 676 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 677 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 678 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 679 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 680 } 681 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 682 683 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 684 { 685 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 686 } 687 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 688 689 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 690 { 691 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 692 unsigned long flags; 693 694 spin_lock_irqsave(&rtwpci->irq_lock, flags); 695 rtw89_chip_disable_intr(rtwdev, rtwpci); 696 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 697 rtw89_chip_enable_intr(rtwdev, rtwpci); 698 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 699 } 700 701 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 702 { 703 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 704 unsigned long flags; 705 706 spin_lock_irqsave(&rtwpci->irq_lock, flags); 707 rtw89_chip_disable_intr(rtwdev, rtwpci); 708 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 709 rtw89_chip_enable_intr(rtwdev, rtwpci); 710 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 711 } 712 713 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 714 { 715 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 716 int budget = NAPI_POLL_WEIGHT; 717 718 /* To prevent RXQ get stuck due to run out of budget. */ 719 rtwdev->napi_budget_countdown = budget; 720 721 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 722 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 723 } 724 725 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 726 { 727 struct rtw89_dev *rtwdev = dev; 728 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 729 struct rtw89_pci_isrs isrs; 730 unsigned long flags; 731 732 spin_lock_irqsave(&rtwpci->irq_lock, flags); 733 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 734 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 735 736 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 737 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 738 739 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 740 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 741 742 if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN)) 743 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 744 745 if (unlikely(rtwpci->under_recovery)) 746 goto enable_intr; 747 748 if (unlikely(rtwpci->low_power)) { 749 rtw89_pci_low_power_interrupt_handler(rtwdev); 750 goto enable_intr; 751 } 752 753 if (likely(rtwpci->running)) { 754 local_bh_disable(); 755 napi_schedule(&rtwdev->napi); 756 local_bh_enable(); 757 } 758 759 return IRQ_HANDLED; 760 761 enable_intr: 762 spin_lock_irqsave(&rtwpci->irq_lock, flags); 763 if (likely(rtwpci->running)) 764 rtw89_chip_enable_intr(rtwdev, rtwpci); 765 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 766 return IRQ_HANDLED; 767 } 768 769 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 770 { 771 struct rtw89_dev *rtwdev = dev; 772 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 773 unsigned long flags; 774 irqreturn_t irqret = IRQ_WAKE_THREAD; 775 776 spin_lock_irqsave(&rtwpci->irq_lock, flags); 777 778 /* If interrupt event is on the road, it is still trigger interrupt 779 * even we have done pci_stop() to turn off IMR. 780 */ 781 if (unlikely(!rtwpci->running)) { 782 irqret = IRQ_HANDLED; 783 goto exit; 784 } 785 786 rtw89_chip_disable_intr(rtwdev, rtwpci); 787 exit: 788 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 789 790 return irqret; 791 } 792 793 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 794 [RTW89_TXCH_##txch] = { \ 795 .num = R_AX_##txch##_TXBD_NUM ##v, \ 796 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 797 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 798 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 799 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 800 } 801 802 #define DEF_TXCHADDRS(info, txch, v...) \ 803 [RTW89_TXCH_##txch] = { \ 804 .num = R_AX_##txch##_TXBD_NUM, \ 805 .idx = R_AX_##txch##_TXBD_IDX, \ 806 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 807 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 808 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 809 } 810 811 #define DEF_RXCHADDRS(info, rxch, v...) \ 812 [RTW89_RXCH_##rxch] = { \ 813 .num = R_AX_##rxch##_RXBD_NUM ##v, \ 814 .idx = R_AX_##rxch##_RXBD_IDX ##v, \ 815 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \ 816 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \ 817 } 818 819 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 820 .tx = { 821 DEF_TXCHADDRS(info, ACH0), 822 DEF_TXCHADDRS(info, ACH1), 823 DEF_TXCHADDRS(info, ACH2), 824 DEF_TXCHADDRS(info, ACH3), 825 DEF_TXCHADDRS(info, ACH4), 826 DEF_TXCHADDRS(info, ACH5), 827 DEF_TXCHADDRS(info, ACH6), 828 DEF_TXCHADDRS(info, ACH7), 829 DEF_TXCHADDRS(info, CH8), 830 DEF_TXCHADDRS(info, CH9), 831 DEF_TXCHADDRS_TYPE1(info, CH10), 832 DEF_TXCHADDRS_TYPE1(info, CH11), 833 DEF_TXCHADDRS(info, CH12), 834 }, 835 .rx = { 836 DEF_RXCHADDRS(info, RXQ), 837 DEF_RXCHADDRS(info, RPQ), 838 }, 839 }; 840 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 841 842 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 843 .tx = { 844 DEF_TXCHADDRS(info, ACH0, _V1), 845 DEF_TXCHADDRS(info, ACH1, _V1), 846 DEF_TXCHADDRS(info, ACH2, _V1), 847 DEF_TXCHADDRS(info, ACH3, _V1), 848 DEF_TXCHADDRS(info, ACH4, _V1), 849 DEF_TXCHADDRS(info, ACH5, _V1), 850 DEF_TXCHADDRS(info, ACH6, _V1), 851 DEF_TXCHADDRS(info, ACH7, _V1), 852 DEF_TXCHADDRS(info, CH8, _V1), 853 DEF_TXCHADDRS(info, CH9, _V1), 854 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 855 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 856 DEF_TXCHADDRS(info, CH12, _V1), 857 }, 858 .rx = { 859 DEF_RXCHADDRS(info, RXQ, _V1), 860 DEF_RXCHADDRS(info, RPQ, _V1), 861 }, 862 }; 863 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 864 865 #undef DEF_TXCHADDRS_TYPE1 866 #undef DEF_TXCHADDRS 867 #undef DEF_RXCHADDRS 868 869 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 870 enum rtw89_tx_channel txch, 871 const struct rtw89_pci_ch_dma_addr **addr) 872 { 873 const struct rtw89_pci_info *info = rtwdev->pci_info; 874 875 if (txch >= RTW89_TXCH_NUM) 876 return -EINVAL; 877 878 *addr = &info->dma_addr_set->tx[txch]; 879 880 return 0; 881 } 882 883 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 884 enum rtw89_rx_channel rxch, 885 const struct rtw89_pci_ch_dma_addr **addr) 886 { 887 const struct rtw89_pci_info *info = rtwdev->pci_info; 888 889 if (rxch >= RTW89_RXCH_NUM) 890 return -EINVAL; 891 892 *addr = &info->dma_addr_set->rx[rxch]; 893 894 return 0; 895 } 896 897 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 898 { 899 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 900 901 /* reserved 1 desc check ring is full or not */ 902 if (bd_ring->rp > bd_ring->wp) 903 return bd_ring->rp - bd_ring->wp - 1; 904 905 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 906 } 907 908 static 909 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 910 { 911 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 912 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 913 u32 cnt; 914 915 spin_lock_bh(&rtwpci->trx_lock); 916 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 917 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 918 spin_unlock_bh(&rtwpci->trx_lock); 919 920 return cnt; 921 } 922 923 static 924 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 925 u8 txch) 926 { 927 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 928 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 929 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 930 u32 cnt; 931 932 spin_lock_bh(&rtwpci->trx_lock); 933 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 934 cnt = min(cnt, wd_ring->curr_num); 935 spin_unlock_bh(&rtwpci->trx_lock); 936 937 return cnt; 938 } 939 940 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 941 u8 txch) 942 { 943 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 944 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 945 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 946 u32 bd_cnt, wd_cnt, min_cnt = 0; 947 struct rtw89_pci_rx_ring *rx_ring; 948 u32 cnt; 949 950 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 951 952 spin_lock_bh(&rtwpci->trx_lock); 953 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 954 wd_cnt = wd_ring->curr_num; 955 956 if (wd_cnt == 0 || bd_cnt == 0) { 957 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 958 if (cnt) 959 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 960 else if (wd_cnt == 0) 961 goto out_unlock; 962 963 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 964 if (bd_cnt == 0) 965 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 966 } 967 968 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 969 wd_cnt = wd_ring->curr_num; 970 min_cnt = min(bd_cnt, wd_cnt); 971 if (min_cnt == 0) 972 rtw89_debug(rtwdev, rtwpci->low_power ? RTW89_DBG_TXRX : RTW89_DBG_UNEXP, 973 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 974 wd_cnt, bd_cnt); 975 976 out_unlock: 977 spin_unlock_bh(&rtwpci->trx_lock); 978 979 return min_cnt; 980 } 981 982 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 983 u8 txch) 984 { 985 if (rtwdev->hci.paused) 986 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 987 988 if (txch == RTW89_TXCH_CH12) 989 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 990 991 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 992 } 993 994 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 995 { 996 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 997 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 998 u32 host_idx, addr; 999 1000 spin_lock_bh(&rtwpci->trx_lock); 1001 1002 addr = bd_ring->addr.idx; 1003 host_idx = bd_ring->wp; 1004 rtw89_write16(rtwdev, addr, host_idx); 1005 1006 spin_unlock_bh(&rtwpci->trx_lock); 1007 } 1008 1009 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1010 int n_txbd) 1011 { 1012 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1013 u32 host_idx, len; 1014 1015 len = bd_ring->len; 1016 host_idx = bd_ring->wp + n_txbd; 1017 host_idx = host_idx < len ? host_idx : host_idx - len; 1018 1019 bd_ring->wp = host_idx; 1020 } 1021 1022 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1023 { 1024 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1025 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1026 1027 if (rtwdev->hci.paused) { 1028 set_bit(txch, rtwpci->kick_map); 1029 return; 1030 } 1031 1032 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1033 } 1034 1035 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1036 { 1037 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1038 struct rtw89_pci_tx_ring *tx_ring; 1039 int txch; 1040 1041 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1042 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1043 continue; 1044 1045 tx_ring = &rtwpci->tx_rings[txch]; 1046 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1047 } 1048 } 1049 1050 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1051 { 1052 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1053 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1054 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1055 u32 cur_idx, cur_rp; 1056 u8 i; 1057 1058 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1059 * define a reasonable fixed total timeout to use read_poll_timeout* 1060 * helper. Instead, we can ensure a reasonable polling times, so we 1061 * just use for loop with udelay here. 1062 */ 1063 for (i = 0; i < 60; i++) { 1064 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1065 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1066 if (cur_rp == bd_ring->wp) 1067 return; 1068 1069 udelay(1); 1070 } 1071 1072 if (!drop) 1073 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1074 } 1075 1076 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1077 bool drop) 1078 { 1079 u8 i; 1080 1081 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1082 /* It may be unnecessary to flush FWCMD queue. */ 1083 if (i == RTW89_TXCH_CH12) 1084 continue; 1085 1086 if (txchs & BIT(i)) 1087 __pci_flush_txch(rtwdev, i, drop); 1088 } 1089 } 1090 1091 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1092 bool drop) 1093 { 1094 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1095 } 1096 1097 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1098 void *txaddr_info_addr, u32 total_len, 1099 dma_addr_t dma, u8 *add_info_nr) 1100 { 1101 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1102 1103 txaddr_info->length = cpu_to_le16(total_len); 1104 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1105 RTW89_PCI_ADDR_NUM(1)); 1106 txaddr_info->dma = cpu_to_le32(dma); 1107 1108 *add_info_nr = 1; 1109 1110 return sizeof(*txaddr_info); 1111 } 1112 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1113 1114 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1115 void *txaddr_info_addr, u32 total_len, 1116 dma_addr_t dma, u8 *add_info_nr) 1117 { 1118 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1119 u32 remain = total_len; 1120 u32 len; 1121 u16 length_option; 1122 int n; 1123 1124 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1125 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1126 TXADDR_INFO_LENTHG_V1_MAX : remain; 1127 remain -= len; 1128 1129 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1130 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1131 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1132 txaddr_info->length_opt = cpu_to_le16(length_option); 1133 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1134 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1135 1136 dma += len; 1137 txaddr_info++; 1138 } 1139 1140 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1141 remain, total_len); 1142 1143 *add_info_nr = n; 1144 1145 return n * sizeof(*txaddr_info); 1146 } 1147 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1148 1149 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1150 struct rtw89_pci_tx_ring *tx_ring, 1151 struct rtw89_pci_tx_wd *txwd, 1152 struct rtw89_core_tx_request *tx_req) 1153 { 1154 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1155 const struct rtw89_chip_info *chip = rtwdev->chip; 1156 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1157 struct rtw89_txwd_info *txwd_info; 1158 struct rtw89_pci_tx_wp_info *txwp_info; 1159 void *txaddr_info_addr; 1160 struct pci_dev *pdev = rtwpci->pdev; 1161 struct sk_buff *skb = tx_req->skb; 1162 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1163 bool en_wd_info = desc_info->en_wd_info; 1164 u32 txwd_len; 1165 u32 txwp_len; 1166 u32 txaddr_info_len; 1167 dma_addr_t dma; 1168 int ret; 1169 1170 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1171 if (dma_mapping_error(&pdev->dev, dma)) { 1172 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1173 ret = -EBUSY; 1174 goto err; 1175 } 1176 1177 tx_data->dma = dma; 1178 1179 txwp_len = sizeof(*txwp_info); 1180 txwd_len = chip->txwd_body_size; 1181 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 1182 1183 txwp_info = txwd->vaddr + txwd_len; 1184 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1185 txwp_info->seq1 = 0; 1186 txwp_info->seq2 = 0; 1187 txwp_info->seq3 = 0; 1188 1189 tx_ring->tx_cnt++; 1190 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1191 txaddr_info_len = 1192 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1193 dma, &desc_info->addr_info_nr); 1194 1195 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1196 1197 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1198 1199 skb_queue_tail(&txwd->queue, skb); 1200 1201 return 0; 1202 1203 err: 1204 return ret; 1205 } 1206 1207 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1208 struct rtw89_pci_tx_ring *tx_ring, 1209 struct rtw89_pci_tx_bd_32 *txbd, 1210 struct rtw89_core_tx_request *tx_req) 1211 { 1212 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1213 const struct rtw89_chip_info *chip = rtwdev->chip; 1214 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1215 void *txdesc; 1216 int txdesc_size = chip->h2c_desc_size; 1217 struct pci_dev *pdev = rtwpci->pdev; 1218 struct sk_buff *skb = tx_req->skb; 1219 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1220 dma_addr_t dma; 1221 1222 txdesc = skb_push(skb, txdesc_size); 1223 memset(txdesc, 0, txdesc_size); 1224 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1225 1226 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1227 if (dma_mapping_error(&pdev->dev, dma)) { 1228 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1229 return -EBUSY; 1230 } 1231 1232 tx_data->dma = dma; 1233 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1234 txbd->length = cpu_to_le16(skb->len); 1235 txbd->dma = cpu_to_le32(tx_data->dma); 1236 skb_queue_tail(&rtwpci->h2c_queue, skb); 1237 1238 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1239 1240 return 0; 1241 } 1242 1243 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1244 struct rtw89_pci_tx_ring *tx_ring, 1245 struct rtw89_pci_tx_bd_32 *txbd, 1246 struct rtw89_core_tx_request *tx_req) 1247 { 1248 struct rtw89_pci_tx_wd *txwd; 1249 int ret; 1250 1251 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1252 * buffer with WD BODY only. So here we don't need to check the free 1253 * pages of the wd ring. 1254 */ 1255 if (tx_ring->txch == RTW89_TXCH_CH12) 1256 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1257 1258 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1259 if (!txwd) { 1260 rtw89_err(rtwdev, "no available TXWD\n"); 1261 ret = -ENOSPC; 1262 goto err; 1263 } 1264 1265 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1266 if (ret) { 1267 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1268 goto err_enqueue_wd; 1269 } 1270 1271 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1272 1273 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1274 txbd->length = cpu_to_le16(txwd->len); 1275 txbd->dma = cpu_to_le32(txwd->paddr); 1276 1277 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1278 1279 return 0; 1280 1281 err_enqueue_wd: 1282 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1283 err: 1284 return ret; 1285 } 1286 1287 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1288 u8 txch) 1289 { 1290 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1291 struct rtw89_pci_tx_ring *tx_ring; 1292 struct rtw89_pci_tx_bd_32 *txbd; 1293 u32 n_avail_txbd; 1294 int ret = 0; 1295 1296 /* check the tx type and dma channel for fw cmd queue */ 1297 if ((txch == RTW89_TXCH_CH12 || 1298 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1299 (txch != RTW89_TXCH_CH12 || 1300 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1301 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1302 return -EINVAL; 1303 } 1304 1305 tx_ring = &rtwpci->tx_rings[txch]; 1306 spin_lock_bh(&rtwpci->trx_lock); 1307 1308 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1309 if (n_avail_txbd == 0) { 1310 rtw89_err(rtwdev, "no available TXBD\n"); 1311 ret = -ENOSPC; 1312 goto err_unlock; 1313 } 1314 1315 txbd = rtw89_pci_get_next_txbd(tx_ring); 1316 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1317 if (ret) { 1318 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1319 goto err_unlock; 1320 } 1321 1322 spin_unlock_bh(&rtwpci->trx_lock); 1323 return 0; 1324 1325 err_unlock: 1326 spin_unlock_bh(&rtwpci->trx_lock); 1327 return ret; 1328 } 1329 1330 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1331 { 1332 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1333 int ret; 1334 1335 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1336 if (ret) { 1337 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1338 return ret; 1339 } 1340 1341 return 0; 1342 } 1343 1344 static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = { 1345 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1346 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1347 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1348 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1349 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1350 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1351 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1352 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1353 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1354 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1355 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1356 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1357 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1358 }; 1359 1360 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1361 { 1362 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1363 struct rtw89_pci_tx_ring *tx_ring; 1364 struct rtw89_pci_rx_ring *rx_ring; 1365 struct rtw89_pci_dma_ring *bd_ring; 1366 const struct rtw89_pci_bd_ram *bd_ram; 1367 u32 addr_num; 1368 u32 addr_bdram; 1369 u32 addr_desa_l; 1370 u32 val32; 1371 int i; 1372 1373 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1374 tx_ring = &rtwpci->tx_rings[i]; 1375 bd_ring = &tx_ring->bd_ring; 1376 bd_ram = &bd_ram_table[i]; 1377 addr_num = bd_ring->addr.num; 1378 addr_bdram = bd_ring->addr.bdram; 1379 addr_desa_l = bd_ring->addr.desa_l; 1380 bd_ring->wp = 0; 1381 bd_ring->rp = 0; 1382 1383 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1384 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1385 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1386 1387 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1388 rtw89_write32(rtwdev, addr_bdram, val32); 1389 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1390 } 1391 1392 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1393 rx_ring = &rtwpci->rx_rings[i]; 1394 bd_ring = &rx_ring->bd_ring; 1395 addr_num = bd_ring->addr.num; 1396 addr_desa_l = bd_ring->addr.desa_l; 1397 bd_ring->wp = 0; 1398 bd_ring->rp = 0; 1399 rx_ring->diliver_skb = NULL; 1400 rx_ring->diliver_desc.ready = false; 1401 1402 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1403 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1404 } 1405 } 1406 1407 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1408 struct rtw89_pci_tx_ring *tx_ring) 1409 { 1410 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1411 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1412 } 1413 1414 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1415 { 1416 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1417 int txch; 1418 1419 rtw89_pci_reset_trx_rings(rtwdev); 1420 1421 spin_lock_bh(&rtwpci->trx_lock); 1422 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1423 if (txch == RTW89_TXCH_CH12) { 1424 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1425 skb_queue_len(&rtwpci->h2c_queue), true); 1426 continue; 1427 } 1428 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1429 } 1430 spin_unlock_bh(&rtwpci->trx_lock); 1431 } 1432 1433 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1434 { 1435 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1436 unsigned long flags; 1437 1438 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1439 rtwpci->running = true; 1440 rtw89_chip_enable_intr(rtwdev, rtwpci); 1441 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1442 } 1443 1444 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1445 { 1446 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1447 unsigned long flags; 1448 1449 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1450 rtwpci->running = false; 1451 rtw89_chip_disable_intr(rtwdev, rtwpci); 1452 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1453 } 1454 1455 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1456 { 1457 rtw89_core_napi_start(rtwdev); 1458 rtw89_pci_enable_intr_lock(rtwdev); 1459 1460 return 0; 1461 } 1462 1463 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1464 { 1465 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1466 struct pci_dev *pdev = rtwpci->pdev; 1467 1468 rtw89_pci_disable_intr_lock(rtwdev); 1469 synchronize_irq(pdev->irq); 1470 rtw89_core_napi_stop(rtwdev); 1471 } 1472 1473 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1474 { 1475 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1476 struct pci_dev *pdev = rtwpci->pdev; 1477 1478 if (pause) { 1479 rtw89_pci_disable_intr_lock(rtwdev); 1480 synchronize_irq(pdev->irq); 1481 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1482 napi_synchronize(&rtwdev->napi); 1483 } else { 1484 rtw89_pci_enable_intr_lock(rtwdev); 1485 rtw89_pci_tx_kick_off_pending(rtwdev); 1486 } 1487 } 1488 1489 static 1490 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1491 { 1492 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1493 const struct rtw89_pci_info *info = rtwdev->pci_info; 1494 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1495 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1496 struct rtw89_pci_tx_ring *tx_ring; 1497 struct rtw89_pci_rx_ring *rx_ring; 1498 int i; 1499 1500 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1501 return; 1502 1503 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1504 tx_ring = &rtwpci->tx_rings[i]; 1505 tx_ring->bd_ring.addr.idx = low_power ? 1506 bd_idx_addr->tx_bd_addrs[i] : 1507 dma_addr_set->tx[i].idx; 1508 } 1509 1510 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1511 rx_ring = &rtwpci->rx_rings[i]; 1512 rx_ring->bd_ring.addr.idx = low_power ? 1513 bd_idx_addr->rx_bd_addrs[i] : 1514 dma_addr_set->rx[i].idx; 1515 } 1516 } 1517 1518 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1519 { 1520 enum rtw89_pci_intr_mask_cfg cfg; 1521 1522 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1523 1524 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1525 rtw89_chip_config_intr_mask(rtwdev, cfg); 1526 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1527 } 1528 1529 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1530 1531 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1532 { 1533 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1534 u32 val = readl(rtwpci->mmap + addr); 1535 int count; 1536 1537 for (count = 0; ; count++) { 1538 if (val != RTW89_R32_DEAD) 1539 return val; 1540 if (count >= MAC_REG_POOL_COUNT) { 1541 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1542 return RTW89_R32_DEAD; 1543 } 1544 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1545 val = readl(rtwpci->mmap + addr); 1546 } 1547 1548 return val; 1549 } 1550 1551 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1552 { 1553 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1554 u32 addr32, val32, shift; 1555 1556 if (!ACCESS_CMAC(addr)) 1557 return readb(rtwpci->mmap + addr); 1558 1559 addr32 = addr & ~0x3; 1560 shift = (addr & 0x3) * 8; 1561 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1562 return val32 >> shift; 1563 } 1564 1565 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1566 { 1567 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1568 u32 addr32, val32, shift; 1569 1570 if (!ACCESS_CMAC(addr)) 1571 return readw(rtwpci->mmap + addr); 1572 1573 addr32 = addr & ~0x3; 1574 shift = (addr & 0x3) * 8; 1575 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1576 return val32 >> shift; 1577 } 1578 1579 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1580 { 1581 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1582 1583 if (!ACCESS_CMAC(addr)) 1584 return readl(rtwpci->mmap + addr); 1585 1586 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1587 } 1588 1589 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1590 { 1591 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1592 1593 writeb(data, rtwpci->mmap + addr); 1594 } 1595 1596 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1597 { 1598 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1599 1600 writew(data, rtwpci->mmap + addr); 1601 } 1602 1603 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1604 { 1605 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1606 1607 writel(data, rtwpci->mmap + addr); 1608 } 1609 1610 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1611 { 1612 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1613 const struct rtw89_pci_info *info = rtwdev->pci_info; 1614 u32 txhci_en = info->txhci_en_bit; 1615 u32 rxhci_en = info->rxhci_en_bit; 1616 1617 if (enable) { 1618 if (chip_id != RTL8852C) 1619 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, 1620 B_AX_STOP_PCIEIO); 1621 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1622 txhci_en | rxhci_en); 1623 if (chip_id == RTL8852C) 1624 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1625 B_AX_STOP_AXI_MST); 1626 } else { 1627 if (chip_id != RTL8852C) 1628 rtw89_write32_set(rtwdev, info->dma_stop1_reg, 1629 B_AX_STOP_PCIEIO); 1630 else 1631 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1632 B_AX_STOP_AXI_MST); 1633 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1634 txhci_en | rxhci_en); 1635 if (chip_id == RTL8852C) 1636 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1637 B_AX_STOP_AXI_MST); 1638 } 1639 } 1640 1641 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1642 { 1643 u16 val; 1644 1645 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1646 1647 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1648 switch (speed) { 1649 case PCIE_PHY_GEN1: 1650 if (addr < 0x20) 1651 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1652 else 1653 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1654 break; 1655 case PCIE_PHY_GEN2: 1656 if (addr < 0x20) 1657 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1658 else 1659 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1660 break; 1661 default: 1662 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1663 return -EINVAL; 1664 } 1665 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1666 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1667 1668 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1669 false, rtwdev, R_AX_MDIO_CFG); 1670 } 1671 1672 static int 1673 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1674 { 1675 int ret; 1676 1677 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1678 if (ret) { 1679 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1680 return ret; 1681 } 1682 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1683 1684 return 0; 1685 } 1686 1687 static int 1688 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1689 { 1690 int ret; 1691 1692 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1693 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1694 if (ret) { 1695 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1696 return ret; 1697 } 1698 1699 return 0; 1700 } 1701 1702 static int 1703 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1704 { 1705 u32 shift; 1706 int ret; 1707 u16 val; 1708 1709 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1710 if (ret) 1711 return ret; 1712 1713 shift = __ffs(mask); 1714 val &= ~mask; 1715 val |= ((data << shift) & mask); 1716 1717 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1718 if (ret) 1719 return ret; 1720 1721 return 0; 1722 } 1723 1724 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1725 { 1726 int ret; 1727 u16 val; 1728 1729 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1730 if (ret) 1731 return ret; 1732 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1733 if (ret) 1734 return ret; 1735 1736 return 0; 1737 } 1738 1739 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1740 { 1741 int ret; 1742 u16 val; 1743 1744 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1745 if (ret) 1746 return ret; 1747 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1748 if (ret) 1749 return ret; 1750 1751 return 0; 1752 } 1753 1754 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1755 u8 data) 1756 { 1757 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1758 struct pci_dev *pdev = rtwpci->pdev; 1759 1760 return pci_write_config_byte(pdev, addr, data); 1761 } 1762 1763 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1764 u8 *value) 1765 { 1766 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1767 struct pci_dev *pdev = rtwpci->pdev; 1768 1769 return pci_read_config_byte(pdev, addr, value); 1770 } 1771 1772 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1773 u8 bit) 1774 { 1775 u8 value; 1776 int ret; 1777 1778 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1779 if (ret) 1780 return ret; 1781 1782 value |= bit; 1783 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1784 1785 return ret; 1786 } 1787 1788 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 1789 u8 bit) 1790 { 1791 u8 value; 1792 int ret; 1793 1794 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1795 if (ret) 1796 return ret; 1797 1798 value &= ~bit; 1799 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1800 1801 return ret; 1802 } 1803 1804 static int 1805 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1806 { 1807 u16 val, tar; 1808 int ret; 1809 1810 /* Enable counter */ 1811 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1812 if (ret) 1813 return ret; 1814 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1815 phy_rate); 1816 if (ret) 1817 return ret; 1818 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1819 phy_rate); 1820 if (ret) 1821 return ret; 1822 1823 fsleep(300); 1824 1825 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1826 if (ret) 1827 return ret; 1828 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1829 phy_rate); 1830 if (ret) 1831 return ret; 1832 1833 tar = tar & 0x0FFF; 1834 if (tar == 0 || tar == 0x0FFF) { 1835 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1836 return -EINVAL; 1837 } 1838 1839 *target = tar; 1840 1841 return 0; 1842 } 1843 1844 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1845 { 1846 enum rtw89_pcie_phy phy_rate; 1847 u16 val16, mgn_set, div_set, tar; 1848 u8 val8, bdr_ori; 1849 bool l1_flag = false; 1850 int ret = 0; 1851 1852 if (rtwdev->chip->chip_id != RTL8852B) 1853 return 0; 1854 1855 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 1856 if (ret) { 1857 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 1858 RTW89_PCIE_PHY_RATE); 1859 return ret; 1860 } 1861 1862 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 1863 phy_rate = PCIE_PHY_GEN1; 1864 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 1865 phy_rate = PCIE_PHY_GEN2; 1866 } else { 1867 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 1868 return -EOPNOTSUPP; 1869 } 1870 /* Disable L1BD */ 1871 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 1872 if (ret) { 1873 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 1874 return ret; 1875 } 1876 1877 if (bdr_ori & RTW89_PCIE_BIT_L1) { 1878 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1879 bdr_ori & ~RTW89_PCIE_BIT_L1); 1880 if (ret) { 1881 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1882 RTW89_PCIE_L1_CTRL); 1883 return ret; 1884 } 1885 l1_flag = true; 1886 } 1887 1888 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1889 if (ret) { 1890 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1891 goto end; 1892 } 1893 1894 if (val16 & B_AX_CALIB_EN) { 1895 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 1896 val16 & ~B_AX_CALIB_EN, phy_rate); 1897 if (ret) { 1898 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1899 goto end; 1900 } 1901 } 1902 1903 if (!autook_en) 1904 goto end; 1905 /* Set div */ 1906 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 1907 if (ret) { 1908 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1909 goto end; 1910 } 1911 1912 /* Obtain div and margin */ 1913 ret = __get_target(rtwdev, &tar, phy_rate); 1914 if (ret) { 1915 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 1916 goto end; 1917 } 1918 1919 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 1920 1921 if (mgn_set >= 128) { 1922 div_set = 0x0003; 1923 mgn_set = 0x000F; 1924 } else if (mgn_set >= 64) { 1925 div_set = 0x0003; 1926 mgn_set >>= 3; 1927 } else if (mgn_set >= 32) { 1928 div_set = 0x0002; 1929 mgn_set >>= 2; 1930 } else if (mgn_set >= 16) { 1931 div_set = 0x0001; 1932 mgn_set >>= 1; 1933 } else if (mgn_set == 0) { 1934 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 1935 goto end; 1936 } else { 1937 div_set = 0x0000; 1938 } 1939 1940 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1941 if (ret) { 1942 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1943 goto end; 1944 } 1945 1946 val16 |= u16_encode_bits(div_set, B_AX_DIV); 1947 1948 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 1949 if (ret) { 1950 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1951 goto end; 1952 } 1953 1954 ret = __get_target(rtwdev, &tar, phy_rate); 1955 if (ret) { 1956 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 1957 goto end; 1958 } 1959 1960 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 1961 tar, div_set, mgn_set); 1962 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 1963 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 1964 if (ret) { 1965 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 1966 goto end; 1967 } 1968 1969 /* Enable function */ 1970 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 1971 if (ret) { 1972 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1973 goto end; 1974 } 1975 1976 /* CLK delay = 0 */ 1977 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 1978 PCIE_CLKDLY_HW_0); 1979 1980 end: 1981 /* Set L1BD to ori */ 1982 if (l1_flag) { 1983 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1984 bdr_ori); 1985 if (ret) { 1986 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1987 RTW89_PCIE_L1_CTRL); 1988 return ret; 1989 } 1990 } 1991 1992 return ret; 1993 } 1994 1995 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 1996 { 1997 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1998 int ret; 1999 2000 if (chip_id == RTL8852A) { 2001 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2002 PCIE_PHY_GEN1); 2003 if (ret) 2004 return ret; 2005 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2006 PCIE_PHY_GEN2); 2007 if (ret) 2008 return ret; 2009 } else if (chip_id == RTL8852C) { 2010 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2011 B_AX_DEGLITCH); 2012 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2013 B_AX_DEGLITCH); 2014 } 2015 2016 return 0; 2017 } 2018 2019 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2020 { 2021 if (rtwdev->chip->chip_id != RTL8852A) 2022 return; 2023 2024 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2025 } 2026 2027 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2028 { 2029 if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) 2030 return; 2031 2032 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2033 } 2034 2035 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2036 { 2037 int ret; 2038 2039 if (rtwdev->chip->chip_id != RTL8852A) 2040 return 0; 2041 2042 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2043 PCIE_PHY_GEN1); 2044 if (ret) 2045 return ret; 2046 2047 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2048 PCIE_PHY_GEN2); 2049 if (ret) 2050 return ret; 2051 2052 return 0; 2053 } 2054 2055 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2056 { 2057 if (rtwdev->chip->chip_id != RTL8852A) 2058 return; 2059 2060 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2061 } 2062 2063 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2064 { 2065 if (rtwdev->chip->chip_id == RTL8852A || 2066 rtwdev->chip->chip_id == RTL8852B) { 2067 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2068 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2069 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2070 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2071 } else if (rtwdev->chip->chip_id == RTL8852C) { 2072 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2073 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2074 } 2075 } 2076 2077 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2078 { 2079 if (rtwdev->chip->chip_id != RTL8852B) 2080 return 0; 2081 2082 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2083 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2084 } 2085 2086 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2087 { 2088 if (pwr_up) 2089 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2090 else 2091 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2092 } 2093 2094 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2095 { 2096 if (rtwdev->chip->chip_id != RTL8852C) 2097 return; 2098 2099 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2100 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2101 } 2102 2103 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2104 { 2105 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2106 return; 2107 2108 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2109 } 2110 2111 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2112 { 2113 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2114 return; 2115 2116 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2117 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2118 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2119 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2120 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2121 } 2122 2123 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2124 { 2125 if (rtwdev->chip->chip_id != RTL8852C) 2126 return; 2127 2128 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2129 } 2130 2131 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2132 { 2133 if (rtwdev->chip->chip_id != RTL8852C) 2134 return; 2135 2136 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2137 } 2138 2139 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2140 { 2141 if (rtwdev->chip->chip_id == RTL8852C) 2142 return; 2143 2144 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2145 B_AX_SIC_EN_FORCE_CLKREQ); 2146 } 2147 2148 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2149 { 2150 const struct rtw89_pci_info *info = rtwdev->pci_info; 2151 u32 lbc; 2152 2153 if (rtwdev->chip->chip_id == RTL8852C) 2154 return; 2155 2156 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2157 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2158 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2159 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2160 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2161 } else { 2162 lbc &= ~B_AX_LBC_EN; 2163 } 2164 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2165 } 2166 2167 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2168 { 2169 const struct rtw89_pci_info *info = rtwdev->pci_info; 2170 u32 val32; 2171 2172 if (rtwdev->chip->chip_id != RTL8852C) 2173 return; 2174 2175 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2176 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2177 info->io_rcy_tmr); 2178 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2179 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2180 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2181 2182 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2183 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2184 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2185 } else { 2186 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2187 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2188 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2189 } 2190 2191 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2192 } 2193 2194 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2195 { 2196 if (rtwdev->chip->chip_id == RTL8852C) 2197 return; 2198 2199 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2200 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2201 2202 if (rtwdev->chip->chip_id == RTL8852A) 2203 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2204 B_AX_EN_CHKDSC_NO_RX_STUCK); 2205 } 2206 2207 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2208 { 2209 if (rtwdev->chip->chip_id == RTL8852C) 2210 return; 2211 2212 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2213 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2214 } 2215 2216 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 2217 { 2218 const struct rtw89_pci_info *info = rtwdev->pci_info; 2219 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2220 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2221 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2222 B_AX_CLR_CH12_IDX; 2223 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2224 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2225 2226 if (chip_id == RTL8852A || chip_id == RTL8852C) 2227 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2228 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2229 /* clear DMA indexes */ 2230 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2231 if (chip_id == RTL8852A || chip_id == RTL8852C) 2232 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2233 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2234 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2235 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2236 } 2237 2238 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2239 { 2240 const struct rtw89_pci_info *info = rtwdev->pci_info; 2241 u32 ret, check, dma_busy; 2242 u32 dma_busy1 = info->dma_busy1_reg; 2243 u32 dma_busy2 = info->dma_busy2_reg; 2244 2245 check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | 2246 B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY | 2247 B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY | 2248 B_AX_CH9_BUSY | B_AX_CH12_BUSY; 2249 2250 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2251 10, 100, false, rtwdev, dma_busy1); 2252 if (ret) 2253 return ret; 2254 2255 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2256 2257 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2258 10, 100, false, rtwdev, dma_busy2); 2259 if (ret) 2260 return ret; 2261 2262 return 0; 2263 } 2264 2265 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2266 { 2267 const struct rtw89_pci_info *info = rtwdev->pci_info; 2268 u32 ret, check, dma_busy; 2269 u32 dma_busy3 = info->dma_busy3_reg; 2270 2271 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2272 2273 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2274 10, 100, false, rtwdev, dma_busy3); 2275 if (ret) 2276 return ret; 2277 2278 return 0; 2279 } 2280 2281 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2282 { 2283 u32 ret; 2284 2285 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2286 if (ret) { 2287 rtw89_err(rtwdev, "txdma ch busy\n"); 2288 return ret; 2289 } 2290 2291 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2292 if (ret) { 2293 rtw89_err(rtwdev, "rxdma ch busy\n"); 2294 return ret; 2295 } 2296 2297 return 0; 2298 } 2299 2300 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2301 { 2302 const struct rtw89_pci_info *info = rtwdev->pci_info; 2303 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2304 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2305 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2306 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2307 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2308 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2309 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2310 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2311 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2312 u8 cv = rtwdev->hal.cv; 2313 u32 val32; 2314 2315 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2316 if (chip_id == RTL8852A && cv == CHIP_CBV) 2317 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2318 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2319 if (chip_id == RTL8852A || chip_id == RTL8852B) 2320 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2321 } 2322 2323 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2324 if (chip_id == RTL8852A && cv == CHIP_CBV) 2325 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2326 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2327 if (chip_id == RTL8852A || chip_id == RTL8852B) 2328 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2329 } 2330 2331 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2332 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2333 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2334 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2335 2336 if (chip_id == RTL8852A || chip_id == RTL8852B) 2337 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2338 B_AX_PCIE_RX_APPLEN_MASK, 0); 2339 } 2340 2341 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2342 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2343 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2344 } else if (chip_id == RTL8852C) { 2345 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2346 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2347 } 2348 2349 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2350 if (tag_mode == MAC_AX_TAG_SGL) { 2351 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2352 ~B_AX_LATENCY_CONTROL; 2353 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2354 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2355 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2356 B_AX_LATENCY_CONTROL; 2357 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2358 } 2359 } 2360 2361 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2362 info->multi_tag_num); 2363 2364 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2365 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2366 wd_dma_idle_intvl); 2367 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2368 wd_dma_act_intvl); 2369 } else if (chip_id == RTL8852C) { 2370 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2371 wd_dma_idle_intvl); 2372 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2373 wd_dma_act_intvl); 2374 } 2375 2376 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2377 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2378 B_AX_HOST_ADDR_INFO_8B_SEL); 2379 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2380 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2381 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2382 B_AX_HOST_ADDR_INFO_8B_SEL); 2383 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2384 } 2385 2386 return 0; 2387 } 2388 2389 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2390 { 2391 const struct rtw89_pci_info *info = rtwdev->pci_info; 2392 2393 if (rtwdev->chip->chip_id == RTL8852A) { 2394 /* ltr sw trigger */ 2395 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2396 } 2397 info->ltr_set(rtwdev, false); 2398 rtw89_pci_ctrl_dma_all(rtwdev, false); 2399 rtw89_pci_clr_idx_all(rtwdev); 2400 2401 return 0; 2402 } 2403 2404 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 2405 { 2406 const struct rtw89_pci_info *info = rtwdev->pci_info; 2407 int ret; 2408 2409 rtw89_pci_rxdma_prefth(rtwdev); 2410 rtw89_pci_l1off_pwroff(rtwdev); 2411 rtw89_pci_deglitch_setting(rtwdev); 2412 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2413 if (ret) { 2414 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2415 return ret; 2416 } 2417 2418 rtw89_pci_aphy_pwrcut(rtwdev); 2419 rtw89_pci_hci_ldo(rtwdev); 2420 rtw89_pci_dphy_delay(rtwdev); 2421 2422 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2423 if (ret) { 2424 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2425 return ret; 2426 } 2427 2428 rtw89_pci_power_wake(rtwdev, true); 2429 rtw89_pci_autoload_hang(rtwdev); 2430 rtw89_pci_l12_vmain(rtwdev); 2431 rtw89_pci_gen2_force_ib(rtwdev); 2432 rtw89_pci_l1_ent_lat(rtwdev); 2433 rtw89_pci_wd_exit_l1(rtwdev); 2434 rtw89_pci_set_sic(rtwdev); 2435 rtw89_pci_set_lbc(rtwdev); 2436 rtw89_pci_set_io_rcy(rtwdev); 2437 rtw89_pci_set_dbg(rtwdev); 2438 rtw89_pci_set_keep_reg(rtwdev); 2439 2440 rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA); 2441 2442 /* stop DMA activities */ 2443 rtw89_pci_ctrl_dma_all(rtwdev, false); 2444 2445 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2446 if (ret) { 2447 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2448 return ret; 2449 } 2450 2451 rtw89_pci_clr_idx_all(rtwdev); 2452 rtw89_pci_mode_op(rtwdev); 2453 2454 /* fill TRX BD indexes */ 2455 rtw89_pci_ops_reset(rtwdev); 2456 2457 ret = rtw89_pci_rst_bdram_pcie(rtwdev); 2458 if (ret) { 2459 rtw89_warn(rtwdev, "reset bdram busy\n"); 2460 return ret; 2461 } 2462 2463 /* enable FW CMD queue to download firmware */ 2464 rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); 2465 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12); 2466 rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); 2467 2468 /* start DMA activities */ 2469 rtw89_pci_ctrl_dma_all(rtwdev, true); 2470 2471 return 0; 2472 } 2473 2474 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2475 { 2476 u32 val; 2477 2478 if (!en) 2479 return 0; 2480 2481 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2482 if (rtw89_pci_ltr_is_err_reg_val(val)) 2483 return -EINVAL; 2484 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2485 if (rtw89_pci_ltr_is_err_reg_val(val)) 2486 return -EINVAL; 2487 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2488 if (rtw89_pci_ltr_is_err_reg_val(val)) 2489 return -EINVAL; 2490 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2491 if (rtw89_pci_ltr_is_err_reg_val(val)) 2492 return -EINVAL; 2493 2494 rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN); 2495 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN); 2496 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2497 PCI_LTR_SPC_500US); 2498 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2499 PCI_LTR_IDLE_TIMER_800US); 2500 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2501 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2502 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0); 2503 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2504 2505 return 0; 2506 } 2507 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2508 2509 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2510 { 2511 u32 dec_ctrl; 2512 u32 val32; 2513 2514 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2515 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2516 return -EINVAL; 2517 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2518 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2519 return -EINVAL; 2520 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2521 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2522 return -EINVAL; 2523 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2524 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2525 return -EINVAL; 2526 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2527 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2528 return -EINVAL; 2529 2530 if (!en) { 2531 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2532 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2533 B_AX_LTR_REQ_DRV; 2534 } else { 2535 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2536 } 2537 2538 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2539 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2540 2541 if (en) 2542 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2543 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2544 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2545 PCI_LTR_IDLE_TIMER_3_2MS); 2546 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2547 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2548 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2549 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2550 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2551 2552 return 0; 2553 } 2554 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2555 2556 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 2557 { 2558 const struct rtw89_pci_info *info = rtwdev->pci_info; 2559 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2560 int ret; 2561 2562 ret = info->ltr_set(rtwdev, true); 2563 if (ret) { 2564 rtw89_err(rtwdev, "pci ltr set fail\n"); 2565 return ret; 2566 } 2567 if (chip_id == RTL8852A) { 2568 /* ltr sw trigger */ 2569 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2570 } 2571 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2572 /* ADDR info 8-byte mode */ 2573 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2574 B_AX_HOST_ADDR_INFO_8B_SEL); 2575 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2576 } 2577 2578 /* enable DMA for all queues */ 2579 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); 2580 rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); 2581 2582 /* Release PCI IO */ 2583 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, 2584 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2585 2586 return 0; 2587 } 2588 2589 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2590 struct pci_dev *pdev) 2591 { 2592 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2593 int ret; 2594 2595 ret = pci_enable_device(pdev); 2596 if (ret) { 2597 rtw89_err(rtwdev, "failed to enable pci device\n"); 2598 return ret; 2599 } 2600 2601 pci_set_master(pdev); 2602 pci_set_drvdata(pdev, rtwdev->hw); 2603 2604 rtwpci->pdev = pdev; 2605 2606 return 0; 2607 } 2608 2609 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2610 struct pci_dev *pdev) 2611 { 2612 pci_clear_master(pdev); 2613 pci_disable_device(pdev); 2614 } 2615 2616 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2617 struct pci_dev *pdev) 2618 { 2619 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2620 unsigned long resource_len; 2621 u8 bar_id = 2; 2622 int ret; 2623 2624 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2625 if (ret) { 2626 rtw89_err(rtwdev, "failed to request pci regions\n"); 2627 goto err; 2628 } 2629 2630 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2631 if (ret) { 2632 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2633 goto err_release_regions; 2634 } 2635 2636 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2637 if (ret) { 2638 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2639 goto err_release_regions; 2640 } 2641 2642 resource_len = pci_resource_len(pdev, bar_id); 2643 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2644 if (!rtwpci->mmap) { 2645 rtw89_err(rtwdev, "failed to map pci io\n"); 2646 ret = -EIO; 2647 goto err_release_regions; 2648 } 2649 2650 return 0; 2651 2652 err_release_regions: 2653 pci_release_regions(pdev); 2654 err: 2655 return ret; 2656 } 2657 2658 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2659 struct pci_dev *pdev) 2660 { 2661 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2662 2663 if (rtwpci->mmap) { 2664 pci_iounmap(pdev, rtwpci->mmap); 2665 pci_release_regions(pdev); 2666 } 2667 } 2668 2669 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2670 struct pci_dev *pdev, 2671 struct rtw89_pci_tx_ring *tx_ring) 2672 { 2673 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2674 u8 *head = wd_ring->head; 2675 dma_addr_t dma = wd_ring->dma; 2676 u32 page_size = wd_ring->page_size; 2677 u32 page_num = wd_ring->page_num; 2678 u32 ring_sz = page_size * page_num; 2679 2680 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2681 wd_ring->head = NULL; 2682 } 2683 2684 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2685 struct pci_dev *pdev, 2686 struct rtw89_pci_tx_ring *tx_ring) 2687 { 2688 int ring_sz; 2689 u8 *head; 2690 dma_addr_t dma; 2691 2692 head = tx_ring->bd_ring.head; 2693 dma = tx_ring->bd_ring.dma; 2694 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2695 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2696 2697 tx_ring->bd_ring.head = NULL; 2698 } 2699 2700 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2701 struct pci_dev *pdev) 2702 { 2703 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2704 struct rtw89_pci_tx_ring *tx_ring; 2705 int i; 2706 2707 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2708 tx_ring = &rtwpci->tx_rings[i]; 2709 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2710 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2711 } 2712 } 2713 2714 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2715 struct pci_dev *pdev, 2716 struct rtw89_pci_rx_ring *rx_ring) 2717 { 2718 struct rtw89_pci_rx_info *rx_info; 2719 struct sk_buff *skb; 2720 dma_addr_t dma; 2721 u32 buf_sz; 2722 u8 *head; 2723 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2724 int i; 2725 2726 buf_sz = rx_ring->buf_sz; 2727 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2728 skb = rx_ring->buf[i]; 2729 if (!skb) 2730 continue; 2731 2732 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2733 dma = rx_info->dma; 2734 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2735 dev_kfree_skb(skb); 2736 rx_ring->buf[i] = NULL; 2737 } 2738 2739 head = rx_ring->bd_ring.head; 2740 dma = rx_ring->bd_ring.dma; 2741 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2742 2743 rx_ring->bd_ring.head = NULL; 2744 } 2745 2746 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2747 struct pci_dev *pdev) 2748 { 2749 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2750 struct rtw89_pci_rx_ring *rx_ring; 2751 int i; 2752 2753 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2754 rx_ring = &rtwpci->rx_rings[i]; 2755 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2756 } 2757 } 2758 2759 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2760 struct pci_dev *pdev) 2761 { 2762 rtw89_pci_free_rx_rings(rtwdev, pdev); 2763 rtw89_pci_free_tx_rings(rtwdev, pdev); 2764 } 2765 2766 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2767 struct rtw89_pci_rx_ring *rx_ring, 2768 struct sk_buff *skb, int buf_sz, u32 idx) 2769 { 2770 struct rtw89_pci_rx_info *rx_info; 2771 struct rtw89_pci_rx_bd_32 *rx_bd; 2772 dma_addr_t dma; 2773 2774 if (!skb) 2775 return -EINVAL; 2776 2777 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2778 if (dma_mapping_error(&pdev->dev, dma)) 2779 return -EBUSY; 2780 2781 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2782 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2783 2784 memset(rx_bd, 0, sizeof(*rx_bd)); 2785 rx_bd->buf_size = cpu_to_le16(buf_sz); 2786 rx_bd->dma = cpu_to_le32(dma); 2787 rx_info->dma = dma; 2788 2789 return 0; 2790 } 2791 2792 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2793 struct pci_dev *pdev, 2794 struct rtw89_pci_tx_ring *tx_ring, 2795 enum rtw89_tx_channel txch) 2796 { 2797 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2798 struct rtw89_pci_tx_wd *txwd; 2799 dma_addr_t dma; 2800 dma_addr_t cur_paddr; 2801 u8 *head; 2802 u8 *cur_vaddr; 2803 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2804 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2805 u32 ring_sz = page_size * page_num; 2806 u32 page_offset; 2807 int i; 2808 2809 /* FWCMD queue doesn't use txwd as pages */ 2810 if (txch == RTW89_TXCH_CH12) 2811 return 0; 2812 2813 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2814 if (!head) 2815 return -ENOMEM; 2816 2817 INIT_LIST_HEAD(&wd_ring->free_pages); 2818 wd_ring->head = head; 2819 wd_ring->dma = dma; 2820 wd_ring->page_size = page_size; 2821 wd_ring->page_num = page_num; 2822 2823 page_offset = 0; 2824 for (i = 0; i < page_num; i++) { 2825 txwd = &wd_ring->pages[i]; 2826 cur_paddr = dma + page_offset; 2827 cur_vaddr = head + page_offset; 2828 2829 skb_queue_head_init(&txwd->queue); 2830 INIT_LIST_HEAD(&txwd->list); 2831 txwd->paddr = cur_paddr; 2832 txwd->vaddr = cur_vaddr; 2833 txwd->len = page_size; 2834 txwd->seq = i; 2835 rtw89_pci_enqueue_txwd(tx_ring, txwd); 2836 2837 page_offset += page_size; 2838 } 2839 2840 return 0; 2841 } 2842 2843 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 2844 struct pci_dev *pdev, 2845 struct rtw89_pci_tx_ring *tx_ring, 2846 u32 desc_size, u32 len, 2847 enum rtw89_tx_channel txch) 2848 { 2849 const struct rtw89_pci_ch_dma_addr *txch_addr; 2850 int ring_sz = desc_size * len; 2851 u8 *head; 2852 dma_addr_t dma; 2853 int ret; 2854 2855 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 2856 if (ret) { 2857 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 2858 goto err; 2859 } 2860 2861 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 2862 if (ret) { 2863 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 2864 goto err_free_wd_ring; 2865 } 2866 2867 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2868 if (!head) { 2869 ret = -ENOMEM; 2870 goto err_free_wd_ring; 2871 } 2872 2873 INIT_LIST_HEAD(&tx_ring->busy_pages); 2874 tx_ring->bd_ring.head = head; 2875 tx_ring->bd_ring.dma = dma; 2876 tx_ring->bd_ring.len = len; 2877 tx_ring->bd_ring.desc_size = desc_size; 2878 tx_ring->bd_ring.addr = *txch_addr; 2879 tx_ring->bd_ring.wp = 0; 2880 tx_ring->bd_ring.rp = 0; 2881 tx_ring->txch = txch; 2882 2883 return 0; 2884 2885 err_free_wd_ring: 2886 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2887 err: 2888 return ret; 2889 } 2890 2891 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 2892 struct pci_dev *pdev) 2893 { 2894 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2895 struct rtw89_pci_tx_ring *tx_ring; 2896 u32 desc_size; 2897 u32 len; 2898 u32 i, tx_allocated; 2899 int ret; 2900 2901 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2902 tx_ring = &rtwpci->tx_rings[i]; 2903 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 2904 len = RTW89_PCI_TXBD_NUM_MAX; 2905 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 2906 desc_size, len, i); 2907 if (ret) { 2908 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 2909 goto err_free; 2910 } 2911 } 2912 2913 return 0; 2914 2915 err_free: 2916 tx_allocated = i; 2917 for (i = 0; i < tx_allocated; i++) { 2918 tx_ring = &rtwpci->tx_rings[i]; 2919 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2920 } 2921 2922 return ret; 2923 } 2924 2925 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 2926 struct pci_dev *pdev, 2927 struct rtw89_pci_rx_ring *rx_ring, 2928 u32 desc_size, u32 len, u32 rxch) 2929 { 2930 const struct rtw89_pci_ch_dma_addr *rxch_addr; 2931 struct sk_buff *skb; 2932 u8 *head; 2933 dma_addr_t dma; 2934 int ring_sz = desc_size * len; 2935 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 2936 int i, allocated; 2937 int ret; 2938 2939 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 2940 if (ret) { 2941 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 2942 return ret; 2943 } 2944 2945 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2946 if (!head) { 2947 ret = -ENOMEM; 2948 goto err; 2949 } 2950 2951 rx_ring->bd_ring.head = head; 2952 rx_ring->bd_ring.dma = dma; 2953 rx_ring->bd_ring.len = len; 2954 rx_ring->bd_ring.desc_size = desc_size; 2955 rx_ring->bd_ring.addr = *rxch_addr; 2956 rx_ring->bd_ring.wp = 0; 2957 rx_ring->bd_ring.rp = 0; 2958 rx_ring->buf_sz = buf_sz; 2959 rx_ring->diliver_skb = NULL; 2960 rx_ring->diliver_desc.ready = false; 2961 2962 for (i = 0; i < len; i++) { 2963 skb = dev_alloc_skb(buf_sz); 2964 if (!skb) { 2965 ret = -ENOMEM; 2966 goto err_free; 2967 } 2968 2969 memset(skb->data, 0, buf_sz); 2970 rx_ring->buf[i] = skb; 2971 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 2972 buf_sz, i); 2973 if (ret) { 2974 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 2975 dev_kfree_skb_any(skb); 2976 rx_ring->buf[i] = NULL; 2977 goto err_free; 2978 } 2979 } 2980 2981 return 0; 2982 2983 err_free: 2984 allocated = i; 2985 for (i = 0; i < allocated; i++) { 2986 skb = rx_ring->buf[i]; 2987 if (!skb) 2988 continue; 2989 dma = *((dma_addr_t *)skb->cb); 2990 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2991 dev_kfree_skb(skb); 2992 rx_ring->buf[i] = NULL; 2993 } 2994 2995 head = rx_ring->bd_ring.head; 2996 dma = rx_ring->bd_ring.dma; 2997 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2998 2999 rx_ring->bd_ring.head = NULL; 3000 err: 3001 return ret; 3002 } 3003 3004 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3005 struct pci_dev *pdev) 3006 { 3007 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3008 struct rtw89_pci_rx_ring *rx_ring; 3009 u32 desc_size; 3010 u32 len; 3011 int i, rx_allocated; 3012 int ret; 3013 3014 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3015 rx_ring = &rtwpci->rx_rings[i]; 3016 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3017 len = RTW89_PCI_RXBD_NUM_MAX; 3018 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3019 desc_size, len, i); 3020 if (ret) { 3021 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3022 goto err_free; 3023 } 3024 } 3025 3026 return 0; 3027 3028 err_free: 3029 rx_allocated = i; 3030 for (i = 0; i < rx_allocated; i++) { 3031 rx_ring = &rtwpci->rx_rings[i]; 3032 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3033 } 3034 3035 return ret; 3036 } 3037 3038 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3039 struct pci_dev *pdev) 3040 { 3041 int ret; 3042 3043 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3044 if (ret) { 3045 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3046 goto err; 3047 } 3048 3049 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3050 if (ret) { 3051 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3052 goto err_free_tx_rings; 3053 } 3054 3055 return 0; 3056 3057 err_free_tx_rings: 3058 rtw89_pci_free_tx_rings(rtwdev, pdev); 3059 err: 3060 return ret; 3061 } 3062 3063 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3064 struct rtw89_pci *rtwpci) 3065 { 3066 skb_queue_head_init(&rtwpci->h2c_queue); 3067 skb_queue_head_init(&rtwpci->h2c_release_queue); 3068 } 3069 3070 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3071 struct pci_dev *pdev) 3072 { 3073 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3074 int ret; 3075 3076 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3077 if (ret) { 3078 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3079 goto err; 3080 } 3081 3082 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3083 if (ret) { 3084 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3085 goto err_pci_unmap; 3086 } 3087 3088 rtw89_pci_h2c_init(rtwdev, rtwpci); 3089 3090 spin_lock_init(&rtwpci->irq_lock); 3091 spin_lock_init(&rtwpci->trx_lock); 3092 3093 return 0; 3094 3095 err_pci_unmap: 3096 rtw89_pci_clear_mapping(rtwdev, pdev); 3097 err: 3098 return ret; 3099 } 3100 3101 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3102 struct pci_dev *pdev) 3103 { 3104 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3105 3106 rtw89_pci_free_trx_rings(rtwdev, pdev); 3107 rtw89_pci_clear_mapping(rtwdev, pdev); 3108 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3109 skb_queue_len(&rtwpci->h2c_queue), true); 3110 } 3111 3112 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3113 { 3114 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3115 3116 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3117 3118 if (rtwpci->under_recovery) { 3119 rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN; 3120 rtwpci->intrs[1] = 0; 3121 } else { 3122 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3123 B_AX_RXDMA_INT_EN | 3124 B_AX_RXP1DMA_INT_EN | 3125 B_AX_RPQDMA_INT_EN | 3126 B_AX_RXDMA_STUCK_INT_EN | 3127 B_AX_RDU_INT_EN | 3128 B_AX_RPQBD_FULL_INT_EN | 3129 B_AX_HS0ISR_IND_INT_EN; 3130 3131 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3132 } 3133 } 3134 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3135 3136 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3137 { 3138 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3139 3140 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3141 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3142 rtwpci->intrs[0] = 0; 3143 rtwpci->intrs[1] = 0; 3144 } 3145 3146 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3147 { 3148 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3149 3150 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3151 B_AX_HS1ISR_IND_INT_EN | 3152 B_AX_HS0ISR_IND_INT_EN; 3153 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3154 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3155 B_AX_RXDMA_INT_EN | 3156 B_AX_RXP1DMA_INT_EN | 3157 B_AX_RPQDMA_INT_EN | 3158 B_AX_RXDMA_STUCK_INT_EN | 3159 B_AX_RDU_INT_EN | 3160 B_AX_RPQBD_FULL_INT_EN; 3161 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3162 } 3163 3164 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3165 { 3166 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3167 3168 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3169 B_AX_HS0ISR_IND_INT_EN; 3170 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3171 rtwpci->intrs[0] = 0; 3172 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3173 } 3174 3175 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3176 { 3177 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3178 3179 if (rtwpci->under_recovery) 3180 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3181 else if (rtwpci->low_power) 3182 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3183 else 3184 rtw89_pci_default_intr_mask_v1(rtwdev); 3185 } 3186 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3187 3188 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3189 struct pci_dev *pdev) 3190 { 3191 unsigned long flags = 0; 3192 int ret; 3193 3194 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3195 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3196 if (ret < 0) { 3197 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3198 goto err; 3199 } 3200 3201 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3202 rtw89_pci_interrupt_handler, 3203 rtw89_pci_interrupt_threadfn, 3204 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3205 if (ret) { 3206 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3207 goto err_free_vector; 3208 } 3209 3210 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3211 3212 return 0; 3213 3214 err_free_vector: 3215 pci_free_irq_vectors(pdev); 3216 err: 3217 return ret; 3218 } 3219 3220 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3221 struct pci_dev *pdev) 3222 { 3223 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3224 pci_free_irq_vectors(pdev); 3225 } 3226 3227 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3228 { 3229 u16 bin = 0, gray_bit; 3230 u32 bit_idx; 3231 3232 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3233 gray_bit = (gray_code >> bit_idx) & 0x1; 3234 if (bit_num - bit_idx > 1) 3235 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3236 bin |= (gray_bit << bit_idx); 3237 } 3238 3239 return bin; 3240 } 3241 3242 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3243 { 3244 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3245 struct pci_dev *pdev = rtwpci->pdev; 3246 u16 val16, filter_out_val; 3247 u32 val, phy_offset; 3248 int ret; 3249 3250 if (rtwdev->chip->chip_id != RTL8852C) 3251 return 0; 3252 3253 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3254 if (val == B_AX_ASPM_CTRL_L1) 3255 return 0; 3256 3257 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3258 if (ret) 3259 return ret; 3260 3261 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3262 if (val == RTW89_PCIE_GEN1_SPEED) { 3263 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3264 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3265 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3266 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3267 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3268 val16 | B_PCIE_BIT_PINOUT_DIS); 3269 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3270 val16 & ~B_PCIE_BIT_RD_SEL); 3271 3272 val16 = rtw89_read16_mask(rtwdev, 3273 phy_offset + RAC_ANA1F * RAC_MULT, 3274 FILTER_OUT_EQ_MASK); 3275 val16 = gray_code_to_bin(val16, hweight16(val16)); 3276 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3277 RAC_MULT); 3278 filter_out_val &= ~REG_FILTER_OUT_MASK; 3279 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3280 3281 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3282 filter_out_val); 3283 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3284 B_BAC_EQ_SEL); 3285 rtw89_write16_set(rtwdev, 3286 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3287 B_PCIE_BIT_PSAVE); 3288 } else { 3289 return -EOPNOTSUPP; 3290 } 3291 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3292 B_PCIE_BIT_PSAVE); 3293 3294 return 0; 3295 } 3296 3297 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3298 { 3299 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3300 int ret; 3301 3302 if (rtw89_pci_disable_clkreq) 3303 return; 3304 3305 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3306 PCIE_CLKDLY_HW_30US); 3307 if (ret) 3308 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3309 3310 if (chip_id == RTL8852A) { 3311 if (enable) 3312 ret = rtw89_pci_config_byte_set(rtwdev, 3313 RTW89_PCIE_L1_CTRL, 3314 RTW89_PCIE_BIT_CLK); 3315 else 3316 ret = rtw89_pci_config_byte_clr(rtwdev, 3317 RTW89_PCIE_L1_CTRL, 3318 RTW89_PCIE_BIT_CLK); 3319 if (ret) 3320 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3321 enable ? "set" : "unset", ret); 3322 } else if (chip_id == RTL8852C) { 3323 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3324 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3325 if (enable) 3326 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3327 B_AX_CLK_REQ_N); 3328 else 3329 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3330 B_AX_CLK_REQ_N); 3331 } 3332 } 3333 3334 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3335 { 3336 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3337 u8 value = 0; 3338 int ret; 3339 3340 if (rtw89_pci_disable_aspm_l1) 3341 return; 3342 3343 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3344 if (ret) 3345 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3346 3347 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3348 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3349 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3350 3351 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3352 if (ret) 3353 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3354 3355 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3356 if (enable) 3357 ret = rtw89_pci_config_byte_set(rtwdev, 3358 RTW89_PCIE_L1_CTRL, 3359 RTW89_PCIE_BIT_L1); 3360 else 3361 ret = rtw89_pci_config_byte_clr(rtwdev, 3362 RTW89_PCIE_L1_CTRL, 3363 RTW89_PCIE_BIT_L1); 3364 } else if (chip_id == RTL8852C) { 3365 if (enable) 3366 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3367 B_AX_ASPM_CTRL_L1); 3368 else 3369 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3370 B_AX_ASPM_CTRL_L1); 3371 } 3372 if (ret) 3373 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3374 enable ? "set" : "unset", ret); 3375 } 3376 3377 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3378 { 3379 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3380 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3381 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3382 u32 val = 0; 3383 3384 if (!rtwdev->scanning && 3385 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 3386 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3387 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3388 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3389 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3390 3391 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 3392 } 3393 3394 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3395 { 3396 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3397 struct pci_dev *pdev = rtwpci->pdev; 3398 u16 link_ctrl; 3399 int ret; 3400 3401 /* Though there is standard PCIE configuration space to set the 3402 * link control register, but by Realtek's design, driver should 3403 * check if host supports CLKREQ/ASPM to enable the HW module. 3404 * 3405 * These functions are implemented by two HW modules associated, 3406 * one is responsible to access PCIE configuration space to 3407 * follow the host settings, and another is in charge of doing 3408 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3409 * the host does not support it, and due to some reasons or wrong 3410 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3411 * loss if HW misbehaves on the link. 3412 * 3413 * Hence it's designed that driver should first check the PCIE 3414 * configuration space is sync'ed and enabled, then driver can turn 3415 * on the other module that is actually working on the mechanism. 3416 */ 3417 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3418 if (ret) { 3419 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3420 return; 3421 } 3422 3423 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3424 rtw89_pci_clkreq_set(rtwdev, true); 3425 3426 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3427 rtw89_pci_aspm_set(rtwdev, true); 3428 } 3429 3430 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3431 { 3432 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3433 int ret; 3434 3435 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3436 if (enable) 3437 ret = rtw89_pci_config_byte_set(rtwdev, 3438 RTW89_PCIE_TIMER_CTRL, 3439 RTW89_PCIE_BIT_L1SUB); 3440 else 3441 ret = rtw89_pci_config_byte_clr(rtwdev, 3442 RTW89_PCIE_TIMER_CTRL, 3443 RTW89_PCIE_BIT_L1SUB); 3444 if (ret) 3445 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3446 enable ? "set" : "unset", ret); 3447 } else if (chip_id == RTL8852C) { 3448 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 3449 RTW89_PCIE_BIT_ASPM_L11 | 3450 RTW89_PCIE_BIT_PCI_L11); 3451 if (ret) 3452 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 3453 if (enable) 3454 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3455 B_AX_L1SUB_DISABLE); 3456 else 3457 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3458 B_AX_L1SUB_DISABLE); 3459 } 3460 } 3461 3462 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3463 { 3464 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3465 struct pci_dev *pdev = rtwpci->pdev; 3466 u32 l1ss_cap_ptr, l1ss_ctrl; 3467 3468 if (rtw89_pci_disable_l1ss) 3469 return; 3470 3471 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3472 if (!l1ss_cap_ptr) 3473 return; 3474 3475 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3476 3477 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3478 rtw89_pci_l1ss_set(rtwdev, true); 3479 } 3480 3481 static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en) 3482 { 3483 const struct rtw89_pci_info *info = rtwdev->pci_info; 3484 u32 val32; 3485 3486 if (en == MAC_AX_FUNC_EN) { 3487 val32 = B_AX_STOP_PCIEIO; 3488 rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32); 3489 3490 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 3491 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3492 } else { 3493 val32 = B_AX_STOP_PCIEIO; 3494 rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32); 3495 3496 val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 3497 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3498 } 3499 } 3500 3501 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 3502 { 3503 int ret = 0; 3504 u32 sts; 3505 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3506 3507 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3508 10, 1000, false, rtwdev, 3509 R_AX_PCIE_DMA_BUSY1); 3510 if (ret) { 3511 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3512 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3513 return -EINVAL; 3514 } 3515 return ret; 3516 } 3517 3518 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 3519 { 3520 u32 val, dma_rst = 0; 3521 int ret; 3522 3523 rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS); 3524 ret = rtw89_pci_poll_io_idle(rtwdev); 3525 if (ret) { 3526 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3527 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3528 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3529 R_AX_DBG_ERR_FLAG, val); 3530 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3531 dma_rst |= B_AX_HCI_TXDMA_EN; 3532 if (val & B_AX_RX_STUCK) 3533 dma_rst |= B_AX_HCI_RXDMA_EN; 3534 val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN); 3535 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst); 3536 rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst); 3537 ret = rtw89_pci_poll_io_idle(rtwdev); 3538 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3539 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3540 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3541 R_AX_DBG_ERR_FLAG, val); 3542 } 3543 3544 return ret; 3545 } 3546 3547 static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en) 3548 { 3549 u32 val32; 3550 3551 if (en == MAC_AX_FUNC_EN) { 3552 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 3553 rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32); 3554 } else { 3555 val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 3556 rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32); 3557 } 3558 } 3559 3560 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 3561 { 3562 int ret = 0; 3563 u32 val32, sts; 3564 3565 val32 = B_AX_RST_BDRAM; 3566 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3567 3568 ret = read_poll_timeout_atomic(rtw89_read32, sts, 3569 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 3570 true, rtwdev, R_AX_PCIE_INIT_CFG1); 3571 return ret; 3572 } 3573 3574 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 3575 { 3576 u32 ret; 3577 3578 rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS); 3579 rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN); 3580 rtw89_pci_clr_idx_all(rtwdev); 3581 3582 ret = rtw89_pci_rst_bdram(rtwdev); 3583 if (ret) 3584 return ret; 3585 3586 rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN); 3587 return ret; 3588 } 3589 3590 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3591 enum rtw89_lv1_rcvy_step step) 3592 { 3593 int ret; 3594 3595 switch (step) { 3596 case RTW89_LV1_RCVY_STEP_1: 3597 ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 3598 if (ret) 3599 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3600 3601 break; 3602 3603 case RTW89_LV1_RCVY_STEP_2: 3604 ret = rtw89_pci_lv1rst_start_dma(rtwdev); 3605 if (ret) 3606 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3607 break; 3608 3609 default: 3610 return -EINVAL; 3611 } 3612 3613 return ret; 3614 } 3615 3616 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3617 { 3618 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3619 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3620 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3621 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3622 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3623 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3624 } 3625 3626 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3627 { 3628 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3629 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3630 unsigned long flags; 3631 int work_done; 3632 3633 rtwdev->napi_budget_countdown = budget; 3634 3635 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 3636 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3637 if (work_done == budget) 3638 return budget; 3639 3640 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 3641 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3642 if (work_done < budget && napi_complete_done(napi, work_done)) { 3643 spin_lock_irqsave(&rtwpci->irq_lock, flags); 3644 if (likely(rtwpci->running)) 3645 rtw89_chip_enable_intr(rtwdev, rtwpci); 3646 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3647 } 3648 3649 return work_done; 3650 } 3651 3652 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3653 { 3654 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3655 struct rtw89_dev *rtwdev = hw->priv; 3656 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3657 3658 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3659 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3660 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3661 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3662 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3663 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3664 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3665 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3666 } else { 3667 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3668 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3669 } 3670 3671 return 0; 3672 } 3673 3674 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3675 { 3676 if (rtwdev->chip->chip_id == RTL8852C) 3677 return; 3678 3679 /* Hardware need write the reg twice to ensure the setting work */ 3680 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3681 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3682 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3683 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3684 } 3685 3686 static int __maybe_unused rtw89_pci_resume(struct device *dev) 3687 { 3688 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3689 struct rtw89_dev *rtwdev = hw->priv; 3690 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3691 3692 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3693 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3694 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3695 if (chip_id == RTL8852A || chip_id == RTL8852B) { 3696 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3697 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3698 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3699 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3700 } else { 3701 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3702 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3703 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3704 B_AX_SEL_REQ_ENTR_L1); 3705 } 3706 rtw89_pci_l2_hci_ldo(rtwdev); 3707 rtw89_pci_filter_out(rtwdev); 3708 rtw89_pci_link_cfg(rtwdev); 3709 rtw89_pci_l1ss_cfg(rtwdev); 3710 3711 return 0; 3712 } 3713 3714 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 3715 EXPORT_SYMBOL(rtw89_pm_ops); 3716 3717 static const struct rtw89_hci_ops rtw89_pci_ops = { 3718 .tx_write = rtw89_pci_ops_tx_write, 3719 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 3720 .flush_queues = rtw89_pci_ops_flush_queues, 3721 .reset = rtw89_pci_ops_reset, 3722 .start = rtw89_pci_ops_start, 3723 .stop = rtw89_pci_ops_stop, 3724 .pause = rtw89_pci_ops_pause, 3725 .switch_mode = rtw89_pci_ops_switch_mode, 3726 .recalc_int_mit = rtw89_pci_recalc_int_mit, 3727 3728 .read8 = rtw89_pci_ops_read8, 3729 .read16 = rtw89_pci_ops_read16, 3730 .read32 = rtw89_pci_ops_read32, 3731 .write8 = rtw89_pci_ops_write8, 3732 .write16 = rtw89_pci_ops_write16, 3733 .write32 = rtw89_pci_ops_write32, 3734 3735 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 3736 .mac_post_init = rtw89_pci_ops_mac_post_init, 3737 .deinit = rtw89_pci_ops_deinit, 3738 3739 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 3740 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 3741 .dump_err_status = rtw89_pci_ops_dump_err_status, 3742 .napi_poll = rtw89_pci_napi_poll, 3743 3744 .recovery_start = rtw89_pci_ops_recovery_start, 3745 .recovery_complete = rtw89_pci_ops_recovery_complete, 3746 }; 3747 3748 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3749 { 3750 struct rtw89_dev *rtwdev; 3751 const struct rtw89_driver_info *info; 3752 const struct rtw89_pci_info *pci_info; 3753 int ret; 3754 3755 info = (const struct rtw89_driver_info *)id->driver_data; 3756 3757 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 3758 sizeof(struct rtw89_pci), 3759 info->chip); 3760 if (!rtwdev) { 3761 dev_err(&pdev->dev, "failed to allocate hw\n"); 3762 return -ENOMEM; 3763 } 3764 3765 pci_info = info->bus.pci; 3766 3767 rtwdev->pci_info = info->bus.pci; 3768 rtwdev->hci.ops = &rtw89_pci_ops; 3769 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 3770 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 3771 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 3772 3773 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 3774 3775 ret = rtw89_core_init(rtwdev); 3776 if (ret) { 3777 rtw89_err(rtwdev, "failed to initialise core\n"); 3778 goto err_release_hw; 3779 } 3780 3781 ret = rtw89_pci_claim_device(rtwdev, pdev); 3782 if (ret) { 3783 rtw89_err(rtwdev, "failed to claim pci device\n"); 3784 goto err_core_deinit; 3785 } 3786 3787 ret = rtw89_pci_setup_resource(rtwdev, pdev); 3788 if (ret) { 3789 rtw89_err(rtwdev, "failed to setup pci resource\n"); 3790 goto err_declaim_pci; 3791 } 3792 3793 ret = rtw89_chip_info_setup(rtwdev); 3794 if (ret) { 3795 rtw89_err(rtwdev, "failed to setup chip information\n"); 3796 goto err_clear_resource; 3797 } 3798 3799 rtw89_pci_filter_out(rtwdev); 3800 rtw89_pci_link_cfg(rtwdev); 3801 rtw89_pci_l1ss_cfg(rtwdev); 3802 3803 ret = rtw89_core_register(rtwdev); 3804 if (ret) { 3805 rtw89_err(rtwdev, "failed to register core\n"); 3806 goto err_clear_resource; 3807 } 3808 3809 rtw89_core_napi_init(rtwdev); 3810 3811 ret = rtw89_pci_request_irq(rtwdev, pdev); 3812 if (ret) { 3813 rtw89_err(rtwdev, "failed to request pci irq\n"); 3814 goto err_unregister; 3815 } 3816 3817 return 0; 3818 3819 err_unregister: 3820 rtw89_core_napi_deinit(rtwdev); 3821 rtw89_core_unregister(rtwdev); 3822 err_clear_resource: 3823 rtw89_pci_clear_resource(rtwdev, pdev); 3824 err_declaim_pci: 3825 rtw89_pci_declaim_device(rtwdev, pdev); 3826 err_core_deinit: 3827 rtw89_core_deinit(rtwdev); 3828 err_release_hw: 3829 rtw89_free_ieee80211_hw(rtwdev); 3830 3831 return ret; 3832 } 3833 EXPORT_SYMBOL(rtw89_pci_probe); 3834 3835 void rtw89_pci_remove(struct pci_dev *pdev) 3836 { 3837 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 3838 struct rtw89_dev *rtwdev; 3839 3840 rtwdev = hw->priv; 3841 3842 rtw89_pci_free_irq(rtwdev, pdev); 3843 rtw89_core_napi_deinit(rtwdev); 3844 rtw89_core_unregister(rtwdev); 3845 rtw89_pci_clear_resource(rtwdev, pdev); 3846 rtw89_pci_declaim_device(rtwdev, pdev); 3847 rtw89_core_deinit(rtwdev); 3848 rtw89_free_ieee80211_hw(rtwdev); 3849 } 3850 EXPORT_SYMBOL(rtw89_pci_remove); 3851 3852 MODULE_AUTHOR("Realtek Corporation"); 3853 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 3854 MODULE_LICENSE("Dual BSD/GPL"); 3855