1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) 23 { 24 u32 val; 25 int ret; 26 27 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM); 28 29 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 30 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 31 rtwdev, R_AX_PCIE_INIT_CFG1); 32 33 return ret; 34 } 35 36 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 37 struct rtw89_pci_dma_ring *bd_ring, 38 u32 cur_idx, bool tx) 39 { 40 const struct rtw89_pci_info *info = rtwdev->pci_info; 41 u32 cnt, cur_rp, wp, rp, len; 42 43 rp = bd_ring->rp; 44 wp = bd_ring->wp; 45 len = bd_ring->len; 46 47 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 48 if (tx) { 49 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 50 } else { 51 if (info->rx_ring_eq_is_full) 52 wp += 1; 53 54 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 55 } 56 57 bd_ring->rp = cur_rp; 58 59 return cnt; 60 } 61 62 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 63 struct rtw89_pci_tx_ring *tx_ring) 64 { 65 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 66 u32 addr_idx = bd_ring->addr.idx; 67 u32 cnt, idx; 68 69 idx = rtw89_read32(rtwdev, addr_idx); 70 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 71 72 return cnt; 73 } 74 75 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 76 struct rtw89_pci *rtwpci, 77 u32 cnt, bool release_all) 78 { 79 struct rtw89_pci_tx_data *tx_data; 80 struct sk_buff *skb; 81 u32 qlen; 82 83 while (cnt--) { 84 skb = skb_dequeue(&rtwpci->h2c_queue); 85 if (!skb) { 86 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 87 return; 88 } 89 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 90 } 91 92 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 93 if (!release_all) 94 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 95 96 while (qlen--) { 97 skb = skb_dequeue(&rtwpci->h2c_release_queue); 98 if (!skb) { 99 rtw89_err(rtwdev, "failed to release fwcmd\n"); 100 return; 101 } 102 tx_data = RTW89_PCI_TX_SKB_CB(skb); 103 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 104 DMA_TO_DEVICE); 105 dev_kfree_skb_any(skb); 106 } 107 } 108 109 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 110 struct rtw89_pci *rtwpci) 111 { 112 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 113 u32 cnt; 114 115 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 116 if (!cnt) 117 return; 118 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 119 } 120 121 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 122 struct rtw89_pci_rx_ring *rx_ring) 123 { 124 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 125 u32 addr_idx = bd_ring->addr.idx; 126 u32 cnt, idx; 127 128 idx = rtw89_read32(rtwdev, addr_idx); 129 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 130 131 return cnt; 132 } 133 134 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 135 struct sk_buff *skb) 136 { 137 struct rtw89_pci_rx_info *rx_info; 138 dma_addr_t dma; 139 140 rx_info = RTW89_PCI_RX_SKB_CB(skb); 141 dma = rx_info->dma; 142 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 143 DMA_FROM_DEVICE); 144 } 145 146 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 147 struct sk_buff *skb) 148 { 149 struct rtw89_pci_rx_info *rx_info; 150 dma_addr_t dma; 151 152 rx_info = RTW89_PCI_RX_SKB_CB(skb); 153 dma = rx_info->dma; 154 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 155 DMA_FROM_DEVICE); 156 } 157 158 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 159 struct sk_buff *skb) 160 { 161 struct rtw89_pci_rxbd_info *rxbd_info; 162 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 163 164 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 165 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 166 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 167 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 168 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 169 170 return 0; 171 } 172 173 static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 174 { 175 const struct rtw89_pci_info *info = rtwdev->pci_info; 176 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 177 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 178 179 if (enable) { 180 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 181 if (dma_stop2->addr) 182 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 183 } else { 184 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 185 if (dma_stop2->addr) 186 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 187 } 188 } 189 190 static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 191 { 192 const struct rtw89_pci_info *info = rtwdev->pci_info; 193 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 194 195 if (enable) 196 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 197 else 198 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 199 } 200 201 static bool 202 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 203 struct sk_buff *new, 204 const struct sk_buff *skb, u32 offset, 205 const struct rtw89_pci_rx_info *rx_info, 206 const struct rtw89_rx_desc_info *desc_info) 207 { 208 u32 copy_len = rx_info->len - offset; 209 210 if (unlikely(skb_tailroom(new) < copy_len)) { 211 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 212 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 213 rx_info->len, desc_info->pkt_size, offset, fs, ls); 214 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 215 skb->data, rx_info->len); 216 /* length of a single segment skb is desc_info->pkt_size */ 217 if (fs && ls) { 218 copy_len = desc_info->pkt_size; 219 } else { 220 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 221 return false; 222 } 223 } 224 225 skb_put_data(new, skb->data + offset, copy_len); 226 227 return true; 228 } 229 230 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev, 231 struct rtw89_pci_dma_ring *bd_ring) 232 { 233 const struct rtw89_pci_info *info = rtwdev->pci_info; 234 u32 wp = bd_ring->wp; 235 236 if (!info->rx_ring_eq_is_full) 237 return wp; 238 239 if (++wp >= bd_ring->len) 240 wp = 0; 241 242 return wp; 243 } 244 245 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 246 struct rtw89_pci_rx_ring *rx_ring) 247 { 248 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 249 struct rtw89_pci_rx_info *rx_info; 250 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 251 struct sk_buff *new = rx_ring->diliver_skb; 252 struct sk_buff *skb; 253 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 254 u32 skb_idx; 255 u32 offset; 256 u32 cnt = 1; 257 bool fs, ls; 258 int ret; 259 260 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 261 skb = rx_ring->buf[skb_idx]; 262 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 263 264 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 265 if (ret) { 266 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 267 bd_ring->wp, ret); 268 goto err_sync_device; 269 } 270 271 rx_info = RTW89_PCI_RX_SKB_CB(skb); 272 fs = rx_info->fs; 273 ls = rx_info->ls; 274 275 if (fs) { 276 if (new) { 277 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 278 "skb should not be ready before first segment start\n"); 279 goto err_sync_device; 280 } 281 if (desc_info->ready) { 282 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 283 goto err_sync_device; 284 } 285 286 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 287 288 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 289 if (!new) 290 goto err_sync_device; 291 292 rx_ring->diliver_skb = new; 293 294 /* first segment has RX desc */ 295 offset = desc_info->offset + desc_info->rxd_len; 296 } else { 297 offset = sizeof(struct rtw89_pci_rxbd_info); 298 if (!new) { 299 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 300 goto err_sync_device; 301 } 302 } 303 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 304 goto err_sync_device; 305 rtw89_pci_sync_skb_for_device(rtwdev, skb); 306 rtw89_pci_rxbd_increase(rx_ring, 1); 307 308 if (!desc_info->ready) { 309 rtw89_warn(rtwdev, "no rx desc information\n"); 310 goto err_free_resource; 311 } 312 if (ls) { 313 rtw89_core_rx(rtwdev, desc_info, new); 314 rx_ring->diliver_skb = NULL; 315 desc_info->ready = false; 316 } 317 318 return cnt; 319 320 err_sync_device: 321 rtw89_pci_sync_skb_for_device(rtwdev, skb); 322 rtw89_pci_rxbd_increase(rx_ring, 1); 323 err_free_resource: 324 if (new) 325 dev_kfree_skb_any(new); 326 rx_ring->diliver_skb = NULL; 327 desc_info->ready = false; 328 329 return cnt; 330 } 331 332 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 333 struct rtw89_pci_rx_ring *rx_ring, 334 u32 cnt) 335 { 336 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 337 u32 rx_cnt; 338 339 while (cnt && rtwdev->napi_budget_countdown > 0) { 340 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 341 if (!rx_cnt) { 342 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 343 344 /* skip the rest RXBD bufs */ 345 rtw89_pci_rxbd_increase(rx_ring, cnt); 346 break; 347 } 348 349 cnt -= rx_cnt; 350 } 351 352 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 353 } 354 355 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 356 struct rtw89_pci *rtwpci, int budget) 357 { 358 struct rtw89_pci_rx_ring *rx_ring; 359 int countdown = rtwdev->napi_budget_countdown; 360 u32 cnt; 361 362 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 363 364 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 365 if (!cnt) 366 return 0; 367 368 cnt = min_t(u32, budget, cnt); 369 370 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 371 372 /* In case of flushing pending SKBs, the countdown may exceed. */ 373 if (rtwdev->napi_budget_countdown <= 0) 374 return budget; 375 376 return budget - countdown; 377 } 378 379 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 380 struct rtw89_pci_tx_ring *tx_ring, 381 struct sk_buff *skb, u8 tx_status) 382 { 383 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 384 struct ieee80211_tx_info *info; 385 386 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 387 388 info = IEEE80211_SKB_CB(skb); 389 ieee80211_tx_info_clear_status(info); 390 391 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 392 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 393 if (tx_status == RTW89_TX_DONE) { 394 info->flags |= IEEE80211_TX_STAT_ACK; 395 tx_ring->tx_acked++; 396 } else { 397 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 398 rtw89_debug(rtwdev, RTW89_DBG_FW, 399 "failed to TX of status %x\n", tx_status); 400 switch (tx_status) { 401 case RTW89_TX_RETRY_LIMIT: 402 tx_ring->tx_retry_lmt++; 403 break; 404 case RTW89_TX_LIFE_TIME: 405 tx_ring->tx_life_time++; 406 break; 407 case RTW89_TX_MACID_DROP: 408 tx_ring->tx_mac_id_drop++; 409 break; 410 default: 411 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 412 break; 413 } 414 } 415 416 ieee80211_tx_status_ni(rtwdev->hw, skb); 417 } 418 419 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 420 { 421 struct rtw89_pci_tx_wd *txwd; 422 u32 cnt; 423 424 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 425 while (cnt--) { 426 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 427 if (!txwd) { 428 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 429 break; 430 } 431 432 list_del_init(&txwd->list); 433 434 /* this skb has been freed by RPP */ 435 if (skb_queue_len(&txwd->queue) == 0) 436 rtw89_pci_enqueue_txwd(tx_ring, txwd); 437 } 438 } 439 440 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 441 struct rtw89_pci_tx_ring *tx_ring) 442 { 443 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 444 struct rtw89_pci_tx_wd *txwd; 445 int i; 446 447 for (i = 0; i < wd_ring->page_num; i++) { 448 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 449 if (!txwd) 450 break; 451 452 list_del_init(&txwd->list); 453 } 454 } 455 456 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 457 struct rtw89_pci_tx_ring *tx_ring, 458 struct rtw89_pci_tx_wd *txwd, u16 seq, 459 u8 tx_status) 460 { 461 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 462 struct rtw89_pci_tx_data *tx_data; 463 struct sk_buff *skb, *tmp; 464 u8 txch = tx_ring->txch; 465 466 if (!list_empty(&txwd->list)) { 467 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 468 /* In low power mode, RPP can receive before updating of TX BD. 469 * In normal mode, it should not happen so give it a warning. 470 */ 471 if (!rtwpci->low_power && !list_empty(&txwd->list)) 472 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 473 txch, seq); 474 } 475 476 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 477 skb_unlink(skb, &txwd->queue); 478 479 tx_data = RTW89_PCI_TX_SKB_CB(skb); 480 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 481 DMA_TO_DEVICE); 482 483 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 484 } 485 486 if (list_empty(&txwd->list)) 487 rtw89_pci_enqueue_txwd(tx_ring, txwd); 488 } 489 490 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 491 struct rtw89_pci_rpp_fmt *rpp) 492 { 493 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 494 struct rtw89_pci_tx_ring *tx_ring; 495 struct rtw89_pci_tx_wd_ring *wd_ring; 496 struct rtw89_pci_tx_wd *txwd; 497 u16 seq; 498 u8 qsel, tx_status, txch; 499 500 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 501 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 502 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 503 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 504 505 if (txch == RTW89_TXCH_CH12) { 506 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 507 return; 508 } 509 510 tx_ring = &rtwpci->tx_rings[txch]; 511 wd_ring = &tx_ring->wd_ring; 512 txwd = &wd_ring->pages[seq]; 513 514 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 515 } 516 517 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 518 struct rtw89_pci_tx_ring *tx_ring) 519 { 520 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 521 struct rtw89_pci_tx_wd *txwd; 522 int i; 523 524 for (i = 0; i < wd_ring->page_num; i++) { 525 txwd = &wd_ring->pages[i]; 526 527 if (!list_empty(&txwd->list)) 528 continue; 529 530 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 531 } 532 } 533 534 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 535 struct rtw89_pci_rx_ring *rx_ring, 536 u32 max_cnt) 537 { 538 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 539 struct rtw89_pci_rx_info *rx_info; 540 struct rtw89_pci_rpp_fmt *rpp; 541 struct rtw89_rx_desc_info desc_info = {}; 542 struct sk_buff *skb; 543 u32 cnt = 0; 544 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 545 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 546 u32 skb_idx; 547 u32 offset; 548 int ret; 549 550 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 551 skb = rx_ring->buf[skb_idx]; 552 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 553 554 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 555 if (ret) { 556 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 557 bd_ring->wp, ret); 558 goto err_sync_device; 559 } 560 561 rx_info = RTW89_PCI_RX_SKB_CB(skb); 562 if (!rx_info->fs || !rx_info->ls) { 563 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 564 return cnt; 565 } 566 567 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 568 569 /* first segment has RX desc */ 570 offset = desc_info.offset + desc_info.rxd_len; 571 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 572 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 573 rtw89_pci_release_rpp(rtwdev, rpp); 574 } 575 576 rtw89_pci_sync_skb_for_device(rtwdev, skb); 577 rtw89_pci_rxbd_increase(rx_ring, 1); 578 cnt++; 579 580 return cnt; 581 582 err_sync_device: 583 rtw89_pci_sync_skb_for_device(rtwdev, skb); 584 return 0; 585 } 586 587 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 588 struct rtw89_pci_rx_ring *rx_ring, 589 u32 cnt) 590 { 591 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 592 u32 release_cnt; 593 594 while (cnt) { 595 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 596 if (!release_cnt) { 597 rtw89_err(rtwdev, "failed to release TX skbs\n"); 598 599 /* skip the rest RXBD bufs */ 600 rtw89_pci_rxbd_increase(rx_ring, cnt); 601 break; 602 } 603 604 cnt -= release_cnt; 605 } 606 607 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 608 } 609 610 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 611 struct rtw89_pci *rtwpci, int budget) 612 { 613 struct rtw89_pci_rx_ring *rx_ring; 614 u32 cnt; 615 int work_done; 616 617 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 618 619 spin_lock_bh(&rtwpci->trx_lock); 620 621 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 622 if (cnt == 0) 623 goto out_unlock; 624 625 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 626 627 out_unlock: 628 spin_unlock_bh(&rtwpci->trx_lock); 629 630 /* always release all RPQ */ 631 work_done = min_t(int, cnt, budget); 632 rtwdev->napi_budget_countdown -= work_done; 633 634 return work_done; 635 } 636 637 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 638 struct rtw89_pci *rtwpci) 639 { 640 struct rtw89_pci_rx_ring *rx_ring; 641 struct rtw89_pci_dma_ring *bd_ring; 642 u32 reg_idx; 643 u16 hw_idx, hw_idx_next, host_idx; 644 int i; 645 646 for (i = 0; i < RTW89_RXCH_NUM; i++) { 647 rx_ring = &rtwpci->rx_rings[i]; 648 bd_ring = &rx_ring->bd_ring; 649 650 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 651 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 652 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 653 hw_idx_next = (hw_idx + 1) % bd_ring->len; 654 655 if (hw_idx_next == host_idx) 656 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 657 658 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 659 "%d RXD unavailable, idx=0x%08x, len=%d\n", 660 i, reg_idx, bd_ring->len); 661 } 662 } 663 664 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 665 struct rtw89_pci *rtwpci, 666 struct rtw89_pci_isrs *isrs) 667 { 668 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 669 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 670 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 671 672 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 673 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 674 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 675 } 676 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 677 678 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 679 struct rtw89_pci *rtwpci, 680 struct rtw89_pci_isrs *isrs) 681 { 682 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 683 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 684 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 685 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 686 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 687 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 688 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 689 690 if (isrs->halt_c2h_isrs) 691 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 692 if (isrs->isrs[0]) 693 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 694 if (isrs->isrs[1]) 695 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 696 } 697 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 698 699 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, 700 struct rtw89_pci *rtwpci, 701 struct rtw89_pci_isrs *isrs) 702 { 703 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 704 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 705 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 706 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ? 707 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0; 708 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR); 709 710 if (isrs->halt_c2h_isrs) 711 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 712 if (isrs->isrs[0]) 713 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]); 714 if (isrs->isrs[1]) 715 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 716 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 717 } 718 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); 719 720 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 721 { 722 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 723 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 724 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 725 } 726 EXPORT_SYMBOL(rtw89_pci_enable_intr); 727 728 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 729 { 730 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 731 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 732 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 733 } 734 EXPORT_SYMBOL(rtw89_pci_disable_intr); 735 736 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 737 { 738 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 739 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 740 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 741 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 742 } 743 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 744 745 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 746 { 747 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 748 } 749 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 750 751 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 752 { 753 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 754 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]); 755 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 756 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 757 } 758 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2); 759 760 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 761 { 762 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 763 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 764 } 765 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); 766 767 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 768 { 769 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 770 unsigned long flags; 771 772 spin_lock_irqsave(&rtwpci->irq_lock, flags); 773 rtw89_chip_disable_intr(rtwdev, rtwpci); 774 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 775 rtw89_chip_enable_intr(rtwdev, rtwpci); 776 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 777 } 778 779 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 780 { 781 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 782 unsigned long flags; 783 784 spin_lock_irqsave(&rtwpci->irq_lock, flags); 785 rtw89_chip_disable_intr(rtwdev, rtwpci); 786 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 787 rtw89_chip_enable_intr(rtwdev, rtwpci); 788 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 789 } 790 791 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 792 { 793 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 794 int budget = NAPI_POLL_WEIGHT; 795 796 /* To prevent RXQ get stuck due to run out of budget. */ 797 rtwdev->napi_budget_countdown = budget; 798 799 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 800 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 801 } 802 803 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 804 { 805 struct rtw89_dev *rtwdev = dev; 806 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 807 const struct rtw89_pci_info *info = rtwdev->pci_info; 808 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 809 struct rtw89_pci_isrs isrs; 810 unsigned long flags; 811 812 spin_lock_irqsave(&rtwpci->irq_lock, flags); 813 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 814 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 815 816 if (unlikely(isrs.isrs[0] & gen_def->isr_rdu)) 817 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 818 819 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h)) 820 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 821 822 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout)) 823 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 824 825 if (unlikely(rtwpci->under_recovery)) 826 goto enable_intr; 827 828 if (unlikely(rtwpci->low_power)) { 829 rtw89_pci_low_power_interrupt_handler(rtwdev); 830 goto enable_intr; 831 } 832 833 if (likely(rtwpci->running)) { 834 local_bh_disable(); 835 napi_schedule(&rtwdev->napi); 836 local_bh_enable(); 837 } 838 839 return IRQ_HANDLED; 840 841 enable_intr: 842 spin_lock_irqsave(&rtwpci->irq_lock, flags); 843 if (likely(rtwpci->running)) 844 rtw89_chip_enable_intr(rtwdev, rtwpci); 845 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 846 return IRQ_HANDLED; 847 } 848 849 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 850 { 851 struct rtw89_dev *rtwdev = dev; 852 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 853 unsigned long flags; 854 irqreturn_t irqret = IRQ_WAKE_THREAD; 855 856 spin_lock_irqsave(&rtwpci->irq_lock, flags); 857 858 /* If interrupt event is on the road, it is still trigger interrupt 859 * even we have done pci_stop() to turn off IMR. 860 */ 861 if (unlikely(!rtwpci->running)) { 862 irqret = IRQ_HANDLED; 863 goto exit; 864 } 865 866 rtw89_chip_disable_intr(rtwdev, rtwpci); 867 exit: 868 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 869 870 return irqret; 871 } 872 873 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ 874 [RTW89_TXCH_##ch_idx] = { \ 875 .num = R_##gen##_##txch##_TXBD_NUM ##v, \ 876 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 877 .bdram = 0, \ 878 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \ 879 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \ 880 } 881 882 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 883 [RTW89_TXCH_##txch] = { \ 884 .num = R_AX_##txch##_TXBD_NUM ##v, \ 885 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 886 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 887 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 888 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 889 } 890 891 #define DEF_TXCHADDRS(info, txch, v...) \ 892 [RTW89_TXCH_##txch] = { \ 893 .num = R_AX_##txch##_TXBD_NUM, \ 894 .idx = R_AX_##txch##_TXBD_IDX, \ 895 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 896 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 897 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 898 } 899 900 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ 901 [RTW89_RXCH_##ch_idx] = { \ 902 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \ 903 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \ 904 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \ 905 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \ 906 } 907 908 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 909 .tx = { 910 DEF_TXCHADDRS(info, ACH0), 911 DEF_TXCHADDRS(info, ACH1), 912 DEF_TXCHADDRS(info, ACH2), 913 DEF_TXCHADDRS(info, ACH3), 914 DEF_TXCHADDRS(info, ACH4), 915 DEF_TXCHADDRS(info, ACH5), 916 DEF_TXCHADDRS(info, ACH6), 917 DEF_TXCHADDRS(info, ACH7), 918 DEF_TXCHADDRS(info, CH8), 919 DEF_TXCHADDRS(info, CH9), 920 DEF_TXCHADDRS_TYPE1(info, CH10), 921 DEF_TXCHADDRS_TYPE1(info, CH11), 922 DEF_TXCHADDRS(info, CH12), 923 }, 924 .rx = { 925 DEF_RXCHADDRS(AX, RXQ, RXQ), 926 DEF_RXCHADDRS(AX, RPQ, RPQ), 927 }, 928 }; 929 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 930 931 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 932 .tx = { 933 DEF_TXCHADDRS(info, ACH0, _V1), 934 DEF_TXCHADDRS(info, ACH1, _V1), 935 DEF_TXCHADDRS(info, ACH2, _V1), 936 DEF_TXCHADDRS(info, ACH3, _V1), 937 DEF_TXCHADDRS(info, ACH4, _V1), 938 DEF_TXCHADDRS(info, ACH5, _V1), 939 DEF_TXCHADDRS(info, ACH6, _V1), 940 DEF_TXCHADDRS(info, ACH7, _V1), 941 DEF_TXCHADDRS(info, CH8, _V1), 942 DEF_TXCHADDRS(info, CH9, _V1), 943 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 944 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 945 DEF_TXCHADDRS(info, CH12, _V1), 946 }, 947 .rx = { 948 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1), 949 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1), 950 }, 951 }; 952 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 953 954 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = { 955 .tx = { 956 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1), 957 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1), 958 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1), 959 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1), 960 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1), 961 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1), 962 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1), 963 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1), 964 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1), 965 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1), 966 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1), 967 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1), 968 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1), 969 }, 970 .rx = { 971 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1), 972 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1), 973 }, 974 }; 975 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); 976 977 #undef DEF_TXCHADDRS_TYPE1 978 #undef DEF_TXCHADDRS 979 #undef DEF_RXCHADDRS 980 981 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 982 enum rtw89_tx_channel txch, 983 const struct rtw89_pci_ch_dma_addr **addr) 984 { 985 const struct rtw89_pci_info *info = rtwdev->pci_info; 986 987 if (txch >= RTW89_TXCH_NUM) 988 return -EINVAL; 989 990 *addr = &info->dma_addr_set->tx[txch]; 991 992 return 0; 993 } 994 995 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 996 enum rtw89_rx_channel rxch, 997 const struct rtw89_pci_ch_dma_addr **addr) 998 { 999 const struct rtw89_pci_info *info = rtwdev->pci_info; 1000 1001 if (rxch >= RTW89_RXCH_NUM) 1002 return -EINVAL; 1003 1004 *addr = &info->dma_addr_set->rx[rxch]; 1005 1006 return 0; 1007 } 1008 1009 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 1010 { 1011 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 1012 1013 /* reserved 1 desc check ring is full or not */ 1014 if (bd_ring->rp > bd_ring->wp) 1015 return bd_ring->rp - bd_ring->wp - 1; 1016 1017 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 1018 } 1019 1020 static 1021 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 1022 { 1023 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1024 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 1025 u32 cnt; 1026 1027 spin_lock_bh(&rtwpci->trx_lock); 1028 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 1029 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1030 spin_unlock_bh(&rtwpci->trx_lock); 1031 1032 return cnt; 1033 } 1034 1035 static 1036 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 1037 u8 txch) 1038 { 1039 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1040 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1041 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1042 u32 cnt; 1043 1044 spin_lock_bh(&rtwpci->trx_lock); 1045 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1046 cnt = min(cnt, wd_ring->curr_num); 1047 spin_unlock_bh(&rtwpci->trx_lock); 1048 1049 return cnt; 1050 } 1051 1052 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1053 u8 txch) 1054 { 1055 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1056 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1057 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1058 const struct rtw89_chip_info *chip = rtwdev->chip; 1059 u32 bd_cnt, wd_cnt, min_cnt = 0; 1060 struct rtw89_pci_rx_ring *rx_ring; 1061 enum rtw89_debug_mask debug_mask; 1062 u32 cnt; 1063 1064 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 1065 1066 spin_lock_bh(&rtwpci->trx_lock); 1067 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1068 wd_cnt = wd_ring->curr_num; 1069 1070 if (wd_cnt == 0 || bd_cnt == 0) { 1071 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 1072 if (cnt) 1073 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 1074 else if (wd_cnt == 0) 1075 goto out_unlock; 1076 1077 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1078 if (bd_cnt == 0) 1079 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 1080 } 1081 1082 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1083 wd_cnt = wd_ring->curr_num; 1084 min_cnt = min(bd_cnt, wd_cnt); 1085 if (min_cnt == 0) { 1086 /* This message can be frequently shown in low power mode or 1087 * high traffic with small FIFO chips, and we have recognized it as normal 1088 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1089 */ 1090 if (rtwpci->low_power || chip->small_fifo_size) 1091 debug_mask = RTW89_DBG_TXRX; 1092 else 1093 debug_mask = RTW89_DBG_UNEXP; 1094 1095 rtw89_debug(rtwdev, debug_mask, 1096 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1097 wd_cnt, bd_cnt); 1098 } 1099 1100 out_unlock: 1101 spin_unlock_bh(&rtwpci->trx_lock); 1102 1103 return min_cnt; 1104 } 1105 1106 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1107 u8 txch) 1108 { 1109 if (rtwdev->hci.paused) 1110 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1111 1112 if (txch == RTW89_TXCH_CH12) 1113 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1114 1115 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1116 } 1117 1118 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1119 { 1120 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1121 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1122 u32 host_idx, addr; 1123 1124 spin_lock_bh(&rtwpci->trx_lock); 1125 1126 addr = bd_ring->addr.idx; 1127 host_idx = bd_ring->wp; 1128 rtw89_write16(rtwdev, addr, host_idx); 1129 1130 spin_unlock_bh(&rtwpci->trx_lock); 1131 } 1132 1133 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1134 int n_txbd) 1135 { 1136 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1137 u32 host_idx, len; 1138 1139 len = bd_ring->len; 1140 host_idx = bd_ring->wp + n_txbd; 1141 host_idx = host_idx < len ? host_idx : host_idx - len; 1142 1143 bd_ring->wp = host_idx; 1144 } 1145 1146 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1147 { 1148 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1149 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1150 1151 if (rtwdev->hci.paused) { 1152 set_bit(txch, rtwpci->kick_map); 1153 return; 1154 } 1155 1156 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1157 } 1158 1159 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1160 { 1161 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1162 struct rtw89_pci_tx_ring *tx_ring; 1163 int txch; 1164 1165 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1166 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1167 continue; 1168 1169 tx_ring = &rtwpci->tx_rings[txch]; 1170 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1171 } 1172 } 1173 1174 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1175 { 1176 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1177 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1178 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1179 u32 cur_idx, cur_rp; 1180 u8 i; 1181 1182 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1183 * define a reasonable fixed total timeout to use read_poll_timeout* 1184 * helper. Instead, we can ensure a reasonable polling times, so we 1185 * just use for loop with udelay here. 1186 */ 1187 for (i = 0; i < 60; i++) { 1188 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1189 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1190 if (cur_rp == bd_ring->wp) 1191 return; 1192 1193 udelay(1); 1194 } 1195 1196 if (!drop) 1197 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1198 } 1199 1200 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1201 bool drop) 1202 { 1203 const struct rtw89_pci_info *info = rtwdev->pci_info; 1204 u8 i; 1205 1206 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1207 /* It may be unnecessary to flush FWCMD queue. */ 1208 if (i == RTW89_TXCH_CH12) 1209 continue; 1210 if (info->tx_dma_ch_mask & BIT(i)) 1211 continue; 1212 1213 if (txchs & BIT(i)) 1214 __pci_flush_txch(rtwdev, i, drop); 1215 } 1216 } 1217 1218 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1219 bool drop) 1220 { 1221 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1222 } 1223 1224 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1225 void *txaddr_info_addr, u32 total_len, 1226 dma_addr_t dma, u8 *add_info_nr) 1227 { 1228 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1229 1230 txaddr_info->length = cpu_to_le16(total_len); 1231 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1232 RTW89_PCI_ADDR_NUM(1)); 1233 txaddr_info->dma = cpu_to_le32(dma); 1234 1235 *add_info_nr = 1; 1236 1237 return sizeof(*txaddr_info); 1238 } 1239 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1240 1241 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1242 void *txaddr_info_addr, u32 total_len, 1243 dma_addr_t dma, u8 *add_info_nr) 1244 { 1245 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1246 u32 remain = total_len; 1247 u32 len; 1248 u16 length_option; 1249 int n; 1250 1251 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1252 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1253 TXADDR_INFO_LENTHG_V1_MAX : remain; 1254 remain -= len; 1255 1256 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1257 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1258 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1259 txaddr_info->length_opt = cpu_to_le16(length_option); 1260 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1261 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1262 1263 dma += len; 1264 txaddr_info++; 1265 } 1266 1267 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1268 remain, total_len); 1269 1270 *add_info_nr = n; 1271 1272 return n * sizeof(*txaddr_info); 1273 } 1274 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1275 1276 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1277 struct rtw89_pci_tx_ring *tx_ring, 1278 struct rtw89_pci_tx_wd *txwd, 1279 struct rtw89_core_tx_request *tx_req) 1280 { 1281 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1282 const struct rtw89_chip_info *chip = rtwdev->chip; 1283 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1284 struct rtw89_pci_tx_wp_info *txwp_info; 1285 void *txaddr_info_addr; 1286 struct pci_dev *pdev = rtwpci->pdev; 1287 struct sk_buff *skb = tx_req->skb; 1288 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1289 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1290 bool en_wd_info = desc_info->en_wd_info; 1291 u32 txwd_len; 1292 u32 txwp_len; 1293 u32 txaddr_info_len; 1294 dma_addr_t dma; 1295 int ret; 1296 1297 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1298 if (dma_mapping_error(&pdev->dev, dma)) { 1299 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1300 ret = -EBUSY; 1301 goto err; 1302 } 1303 1304 tx_data->dma = dma; 1305 rcu_assign_pointer(skb_data->wait, NULL); 1306 1307 txwp_len = sizeof(*txwp_info); 1308 txwd_len = chip->txwd_body_size; 1309 txwd_len += en_wd_info ? chip->txwd_info_size : 0; 1310 1311 txwp_info = txwd->vaddr + txwd_len; 1312 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1313 txwp_info->seq1 = 0; 1314 txwp_info->seq2 = 0; 1315 txwp_info->seq3 = 0; 1316 1317 tx_ring->tx_cnt++; 1318 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1319 txaddr_info_len = 1320 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1321 dma, &desc_info->addr_info_nr); 1322 1323 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1324 1325 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1326 1327 skb_queue_tail(&txwd->queue, skb); 1328 1329 return 0; 1330 1331 err: 1332 return ret; 1333 } 1334 1335 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1336 struct rtw89_pci_tx_ring *tx_ring, 1337 struct rtw89_pci_tx_bd_32 *txbd, 1338 struct rtw89_core_tx_request *tx_req) 1339 { 1340 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1341 const struct rtw89_chip_info *chip = rtwdev->chip; 1342 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1343 void *txdesc; 1344 int txdesc_size = chip->h2c_desc_size; 1345 struct pci_dev *pdev = rtwpci->pdev; 1346 struct sk_buff *skb = tx_req->skb; 1347 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1348 dma_addr_t dma; 1349 1350 txdesc = skb_push(skb, txdesc_size); 1351 memset(txdesc, 0, txdesc_size); 1352 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1353 1354 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1355 if (dma_mapping_error(&pdev->dev, dma)) { 1356 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1357 return -EBUSY; 1358 } 1359 1360 tx_data->dma = dma; 1361 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1362 txbd->length = cpu_to_le16(skb->len); 1363 txbd->dma = cpu_to_le32(tx_data->dma); 1364 skb_queue_tail(&rtwpci->h2c_queue, skb); 1365 1366 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1367 1368 return 0; 1369 } 1370 1371 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1372 struct rtw89_pci_tx_ring *tx_ring, 1373 struct rtw89_pci_tx_bd_32 *txbd, 1374 struct rtw89_core_tx_request *tx_req) 1375 { 1376 struct rtw89_pci_tx_wd *txwd; 1377 int ret; 1378 1379 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1380 * buffer with WD BODY only. So here we don't need to check the free 1381 * pages of the wd ring. 1382 */ 1383 if (tx_ring->txch == RTW89_TXCH_CH12) 1384 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1385 1386 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1387 if (!txwd) { 1388 rtw89_err(rtwdev, "no available TXWD\n"); 1389 ret = -ENOSPC; 1390 goto err; 1391 } 1392 1393 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1394 if (ret) { 1395 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1396 goto err_enqueue_wd; 1397 } 1398 1399 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1400 1401 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1402 txbd->length = cpu_to_le16(txwd->len); 1403 txbd->dma = cpu_to_le32(txwd->paddr); 1404 1405 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1406 1407 return 0; 1408 1409 err_enqueue_wd: 1410 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1411 err: 1412 return ret; 1413 } 1414 1415 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1416 u8 txch) 1417 { 1418 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1419 struct rtw89_pci_tx_ring *tx_ring; 1420 struct rtw89_pci_tx_bd_32 *txbd; 1421 u32 n_avail_txbd; 1422 int ret = 0; 1423 1424 /* check the tx type and dma channel for fw cmd queue */ 1425 if ((txch == RTW89_TXCH_CH12 || 1426 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1427 (txch != RTW89_TXCH_CH12 || 1428 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1429 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1430 return -EINVAL; 1431 } 1432 1433 tx_ring = &rtwpci->tx_rings[txch]; 1434 spin_lock_bh(&rtwpci->trx_lock); 1435 1436 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1437 if (n_avail_txbd == 0) { 1438 rtw89_err(rtwdev, "no available TXBD\n"); 1439 ret = -ENOSPC; 1440 goto err_unlock; 1441 } 1442 1443 txbd = rtw89_pci_get_next_txbd(tx_ring); 1444 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1445 if (ret) { 1446 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1447 goto err_unlock; 1448 } 1449 1450 spin_unlock_bh(&rtwpci->trx_lock); 1451 return 0; 1452 1453 err_unlock: 1454 spin_unlock_bh(&rtwpci->trx_lock); 1455 return ret; 1456 } 1457 1458 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1459 { 1460 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1461 int ret; 1462 1463 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1464 if (ret) { 1465 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1466 return ret; 1467 } 1468 1469 return 0; 1470 } 1471 1472 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1473 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1474 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1475 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1476 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1477 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1478 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1479 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1480 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1481 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1482 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1483 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1484 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1485 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1486 }; 1487 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1488 1489 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1490 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1491 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1492 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1493 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1494 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1495 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1496 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1497 }; 1498 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1499 1500 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1501 { 1502 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1503 const struct rtw89_pci_info *info = rtwdev->pci_info; 1504 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1505 struct rtw89_pci_tx_ring *tx_ring; 1506 struct rtw89_pci_rx_ring *rx_ring; 1507 struct rtw89_pci_dma_ring *bd_ring; 1508 const struct rtw89_pci_bd_ram *bd_ram; 1509 u32 addr_num; 1510 u32 addr_idx; 1511 u32 addr_bdram; 1512 u32 addr_desa_l; 1513 u32 val32; 1514 int i; 1515 1516 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1517 if (info->tx_dma_ch_mask & BIT(i)) 1518 continue; 1519 1520 tx_ring = &rtwpci->tx_rings[i]; 1521 bd_ring = &tx_ring->bd_ring; 1522 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; 1523 addr_num = bd_ring->addr.num; 1524 addr_bdram = bd_ring->addr.bdram; 1525 addr_desa_l = bd_ring->addr.desa_l; 1526 bd_ring->wp = 0; 1527 bd_ring->rp = 0; 1528 1529 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1530 if (addr_bdram && bd_ram) { 1531 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1532 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1533 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1534 1535 rtw89_write32(rtwdev, addr_bdram, val32); 1536 } 1537 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1538 } 1539 1540 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1541 rx_ring = &rtwpci->rx_rings[i]; 1542 bd_ring = &rx_ring->bd_ring; 1543 addr_num = bd_ring->addr.num; 1544 addr_idx = bd_ring->addr.idx; 1545 addr_desa_l = bd_ring->addr.desa_l; 1546 if (info->rx_ring_eq_is_full) 1547 bd_ring->wp = bd_ring->len - 1; 1548 else 1549 bd_ring->wp = 0; 1550 bd_ring->rp = 0; 1551 rx_ring->diliver_skb = NULL; 1552 rx_ring->diliver_desc.ready = false; 1553 1554 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1555 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1556 1557 if (info->rx_ring_eq_is_full) 1558 rtw89_write16(rtwdev, addr_idx, bd_ring->wp); 1559 } 1560 } 1561 1562 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1563 struct rtw89_pci_tx_ring *tx_ring) 1564 { 1565 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1566 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1567 } 1568 1569 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1570 { 1571 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1572 const struct rtw89_pci_info *info = rtwdev->pci_info; 1573 int txch; 1574 1575 rtw89_pci_reset_trx_rings(rtwdev); 1576 1577 spin_lock_bh(&rtwpci->trx_lock); 1578 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1579 if (info->tx_dma_ch_mask & BIT(txch)) 1580 continue; 1581 if (txch == RTW89_TXCH_CH12) { 1582 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1583 skb_queue_len(&rtwpci->h2c_queue), true); 1584 continue; 1585 } 1586 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1587 } 1588 spin_unlock_bh(&rtwpci->trx_lock); 1589 } 1590 1591 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1592 { 1593 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1594 unsigned long flags; 1595 1596 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1597 rtwpci->running = true; 1598 rtw89_chip_enable_intr(rtwdev, rtwpci); 1599 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1600 } 1601 1602 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1603 { 1604 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1605 unsigned long flags; 1606 1607 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1608 rtwpci->running = false; 1609 rtw89_chip_disable_intr(rtwdev, rtwpci); 1610 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1611 } 1612 1613 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1614 { 1615 rtw89_core_napi_start(rtwdev); 1616 rtw89_pci_enable_intr_lock(rtwdev); 1617 1618 return 0; 1619 } 1620 1621 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1622 { 1623 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1624 struct pci_dev *pdev = rtwpci->pdev; 1625 1626 rtw89_pci_disable_intr_lock(rtwdev); 1627 synchronize_irq(pdev->irq); 1628 rtw89_core_napi_stop(rtwdev); 1629 } 1630 1631 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1632 { 1633 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1634 struct pci_dev *pdev = rtwpci->pdev; 1635 1636 if (pause) { 1637 rtw89_pci_disable_intr_lock(rtwdev); 1638 synchronize_irq(pdev->irq); 1639 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1640 napi_synchronize(&rtwdev->napi); 1641 } else { 1642 rtw89_pci_enable_intr_lock(rtwdev); 1643 rtw89_pci_tx_kick_off_pending(rtwdev); 1644 } 1645 } 1646 1647 static 1648 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1649 { 1650 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1651 const struct rtw89_pci_info *info = rtwdev->pci_info; 1652 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1653 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1654 struct rtw89_pci_tx_ring *tx_ring; 1655 struct rtw89_pci_rx_ring *rx_ring; 1656 int i; 1657 1658 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1659 return; 1660 1661 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1662 tx_ring = &rtwpci->tx_rings[i]; 1663 tx_ring->bd_ring.addr.idx = low_power ? 1664 bd_idx_addr->tx_bd_addrs[i] : 1665 dma_addr_set->tx[i].idx; 1666 } 1667 1668 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1669 rx_ring = &rtwpci->rx_rings[i]; 1670 rx_ring->bd_ring.addr.idx = low_power ? 1671 bd_idx_addr->rx_bd_addrs[i] : 1672 dma_addr_set->rx[i].idx; 1673 } 1674 } 1675 1676 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1677 { 1678 enum rtw89_pci_intr_mask_cfg cfg; 1679 1680 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1681 1682 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1683 rtw89_chip_config_intr_mask(rtwdev, cfg); 1684 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1685 } 1686 1687 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1688 1689 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1690 { 1691 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1692 u32 val = readl(rtwpci->mmap + addr); 1693 int count; 1694 1695 for (count = 0; ; count++) { 1696 if (val != RTW89_R32_DEAD) 1697 return val; 1698 if (count >= MAC_REG_POOL_COUNT) { 1699 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1700 return RTW89_R32_DEAD; 1701 } 1702 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1703 val = readl(rtwpci->mmap + addr); 1704 } 1705 1706 return val; 1707 } 1708 1709 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1710 { 1711 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1712 u32 addr32, val32, shift; 1713 1714 if (!ACCESS_CMAC(addr)) 1715 return readb(rtwpci->mmap + addr); 1716 1717 addr32 = addr & ~0x3; 1718 shift = (addr & 0x3) * 8; 1719 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1720 return val32 >> shift; 1721 } 1722 1723 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1724 { 1725 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1726 u32 addr32, val32, shift; 1727 1728 if (!ACCESS_CMAC(addr)) 1729 return readw(rtwpci->mmap + addr); 1730 1731 addr32 = addr & ~0x3; 1732 shift = (addr & 0x3) * 8; 1733 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1734 return val32 >> shift; 1735 } 1736 1737 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1738 { 1739 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1740 1741 if (!ACCESS_CMAC(addr)) 1742 return readl(rtwpci->mmap + addr); 1743 1744 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1745 } 1746 1747 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1748 { 1749 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1750 1751 writeb(data, rtwpci->mmap + addr); 1752 } 1753 1754 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1755 { 1756 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1757 1758 writew(data, rtwpci->mmap + addr); 1759 } 1760 1761 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1762 { 1763 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1764 1765 writel(data, rtwpci->mmap + addr); 1766 } 1767 1768 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1769 { 1770 const struct rtw89_pci_info *info = rtwdev->pci_info; 1771 1772 if (enable) 1773 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1774 info->rxhci_en_bit | info->txhci_en_bit); 1775 else 1776 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1777 info->rxhci_en_bit | info->txhci_en_bit); 1778 } 1779 1780 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1781 { 1782 const struct rtw89_pci_info *info = rtwdev->pci_info; 1783 const struct rtw89_reg_def *reg = &info->dma_io_stop; 1784 1785 if (enable) 1786 rtw89_write32_clr(rtwdev, reg->addr, reg->mask); 1787 else 1788 rtw89_write32_set(rtwdev, reg->addr, reg->mask); 1789 } 1790 1791 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1792 { 1793 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1794 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1795 } 1796 1797 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1798 { 1799 u16 val; 1800 1801 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1802 1803 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1804 switch (speed) { 1805 case PCIE_PHY_GEN1: 1806 if (addr < 0x20) 1807 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1808 else 1809 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1810 break; 1811 case PCIE_PHY_GEN2: 1812 if (addr < 0x20) 1813 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1814 else 1815 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1816 break; 1817 default: 1818 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1819 return -EINVAL; 1820 } 1821 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1822 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1823 1824 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1825 false, rtwdev, R_AX_MDIO_CFG); 1826 } 1827 1828 static int 1829 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1830 { 1831 int ret; 1832 1833 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1834 if (ret) { 1835 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1836 return ret; 1837 } 1838 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1839 1840 return 0; 1841 } 1842 1843 static int 1844 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1845 { 1846 int ret; 1847 1848 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1849 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1850 if (ret) { 1851 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1852 return ret; 1853 } 1854 1855 return 0; 1856 } 1857 1858 static int 1859 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1860 { 1861 u32 shift; 1862 int ret; 1863 u16 val; 1864 1865 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1866 if (ret) 1867 return ret; 1868 1869 shift = __ffs(mask); 1870 val &= ~mask; 1871 val |= ((data << shift) & mask); 1872 1873 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1874 if (ret) 1875 return ret; 1876 1877 return 0; 1878 } 1879 1880 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1881 { 1882 int ret; 1883 u16 val; 1884 1885 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1886 if (ret) 1887 return ret; 1888 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1889 if (ret) 1890 return ret; 1891 1892 return 0; 1893 } 1894 1895 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1896 { 1897 int ret; 1898 u16 val; 1899 1900 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1901 if (ret) 1902 return ret; 1903 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1904 if (ret) 1905 return ret; 1906 1907 return 0; 1908 } 1909 1910 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) 1911 { 1912 u16 addr_2lsb = addr & B_AX_DBI_2LSB; 1913 u16 write_addr; 1914 u8 flag; 1915 int ret; 1916 1917 write_addr = addr & B_AX_DBI_ADDR_MSK; 1918 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK); 1919 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data); 1920 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); 1921 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); 1922 1923 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 1924 10 * RTW89_PCI_WR_RETRY_CNT, false, 1925 rtwdev, R_AX_DBI_FLAG + 2); 1926 if (ret) 1927 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n", 1928 addr); 1929 1930 return ret; 1931 } 1932 1933 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) 1934 { 1935 u16 read_addr = addr & B_AX_DBI_ADDR_MSK; 1936 u8 flag; 1937 int ret; 1938 1939 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); 1940 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); 1941 1942 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 1943 10 * RTW89_PCI_WR_RETRY_CNT, false, 1944 rtwdev, R_AX_DBI_FLAG + 2); 1945 if (ret) { 1946 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n", 1947 addr); 1948 return ret; 1949 } 1950 1951 read_addr = R_AX_DBI_RDATA + (addr & 3); 1952 *value = rtw89_read8(rtwdev, read_addr); 1953 1954 return 0; 1955 } 1956 1957 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1958 u8 data) 1959 { 1960 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1961 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1962 struct pci_dev *pdev = rtwpci->pdev; 1963 int ret; 1964 1965 ret = pci_write_config_byte(pdev, addr, data); 1966 if (!ret) 1967 return 0; 1968 1969 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) 1970 ret = rtw89_dbi_write8(rtwdev, addr, data); 1971 1972 return ret; 1973 } 1974 1975 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1976 u8 *value) 1977 { 1978 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1979 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1980 struct pci_dev *pdev = rtwpci->pdev; 1981 int ret; 1982 1983 ret = pci_read_config_byte(pdev, addr, value); 1984 if (!ret) 1985 return 0; 1986 1987 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) 1988 ret = rtw89_dbi_read8(rtwdev, addr, value); 1989 1990 return ret; 1991 } 1992 1993 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1994 u8 bit) 1995 { 1996 u8 value; 1997 int ret; 1998 1999 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2000 if (ret) 2001 return ret; 2002 2003 value |= bit; 2004 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2005 2006 return ret; 2007 } 2008 2009 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 2010 u8 bit) 2011 { 2012 u8 value; 2013 int ret; 2014 2015 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2016 if (ret) 2017 return ret; 2018 2019 value &= ~bit; 2020 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2021 2022 return ret; 2023 } 2024 2025 static int 2026 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 2027 { 2028 u16 val, tar; 2029 int ret; 2030 2031 /* Enable counter */ 2032 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 2033 if (ret) 2034 return ret; 2035 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2036 phy_rate); 2037 if (ret) 2038 return ret; 2039 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 2040 phy_rate); 2041 if (ret) 2042 return ret; 2043 2044 fsleep(300); 2045 2046 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 2047 if (ret) 2048 return ret; 2049 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2050 phy_rate); 2051 if (ret) 2052 return ret; 2053 2054 tar = tar & 0x0FFF; 2055 if (tar == 0 || tar == 0x0FFF) { 2056 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 2057 return -EINVAL; 2058 } 2059 2060 *target = tar; 2061 2062 return 0; 2063 } 2064 2065 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 2066 { 2067 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2068 int ret; 2069 2070 if (chip_id != RTL8852B && chip_id != RTL8851B) 2071 return 0; 2072 2073 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 2074 PCIE_AUTOK_4, PCIE_PHY_GEN1); 2075 return ret; 2076 } 2077 2078 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 2079 { 2080 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2081 enum rtw89_pcie_phy phy_rate; 2082 u16 val16, mgn_set, div_set, tar; 2083 u8 val8, bdr_ori; 2084 bool l1_flag = false; 2085 int ret = 0; 2086 2087 if (chip_id != RTL8852B && chip_id != RTL8851B) 2088 return 0; 2089 2090 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 2091 if (ret) { 2092 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 2093 RTW89_PCIE_PHY_RATE); 2094 return ret; 2095 } 2096 2097 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 2098 phy_rate = PCIE_PHY_GEN1; 2099 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 2100 phy_rate = PCIE_PHY_GEN2; 2101 } else { 2102 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 2103 return -EOPNOTSUPP; 2104 } 2105 /* Disable L1BD */ 2106 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 2107 if (ret) { 2108 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 2109 return ret; 2110 } 2111 2112 if (bdr_ori & RTW89_PCIE_BIT_L1) { 2113 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2114 bdr_ori & ~RTW89_PCIE_BIT_L1); 2115 if (ret) { 2116 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2117 RTW89_PCIE_L1_CTRL); 2118 return ret; 2119 } 2120 l1_flag = true; 2121 } 2122 2123 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2124 if (ret) { 2125 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2126 goto end; 2127 } 2128 2129 if (val16 & B_AX_CALIB_EN) { 2130 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 2131 val16 & ~B_AX_CALIB_EN, phy_rate); 2132 if (ret) { 2133 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2134 goto end; 2135 } 2136 } 2137 2138 if (!autook_en) 2139 goto end; 2140 /* Set div */ 2141 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 2142 if (ret) { 2143 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2144 goto end; 2145 } 2146 2147 /* Obtain div and margin */ 2148 ret = __get_target(rtwdev, &tar, phy_rate); 2149 if (ret) { 2150 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2151 goto end; 2152 } 2153 2154 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2155 2156 if (mgn_set >= 128) { 2157 div_set = 0x0003; 2158 mgn_set = 0x000F; 2159 } else if (mgn_set >= 64) { 2160 div_set = 0x0003; 2161 mgn_set >>= 3; 2162 } else if (mgn_set >= 32) { 2163 div_set = 0x0002; 2164 mgn_set >>= 2; 2165 } else if (mgn_set >= 16) { 2166 div_set = 0x0001; 2167 mgn_set >>= 1; 2168 } else if (mgn_set == 0) { 2169 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2170 goto end; 2171 } else { 2172 div_set = 0x0000; 2173 } 2174 2175 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2176 if (ret) { 2177 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2178 goto end; 2179 } 2180 2181 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2182 2183 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2184 if (ret) { 2185 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2186 goto end; 2187 } 2188 2189 ret = __get_target(rtwdev, &tar, phy_rate); 2190 if (ret) { 2191 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2192 goto end; 2193 } 2194 2195 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2196 tar, div_set, mgn_set); 2197 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2198 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2199 if (ret) { 2200 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2201 goto end; 2202 } 2203 2204 /* Enable function */ 2205 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2206 if (ret) { 2207 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2208 goto end; 2209 } 2210 2211 /* CLK delay = 0 */ 2212 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2213 PCIE_CLKDLY_HW_0); 2214 2215 end: 2216 /* Set L1BD to ori */ 2217 if (l1_flag) { 2218 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2219 bdr_ori); 2220 if (ret) { 2221 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2222 RTW89_PCIE_L1_CTRL); 2223 return ret; 2224 } 2225 } 2226 2227 return ret; 2228 } 2229 2230 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2231 { 2232 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2233 int ret; 2234 2235 if (chip_id == RTL8852A) { 2236 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2237 PCIE_PHY_GEN1); 2238 if (ret) 2239 return ret; 2240 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2241 PCIE_PHY_GEN2); 2242 if (ret) 2243 return ret; 2244 } else if (chip_id == RTL8852C) { 2245 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2246 B_AX_DEGLITCH); 2247 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2248 B_AX_DEGLITCH); 2249 } 2250 2251 return 0; 2252 } 2253 2254 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2255 { 2256 if (rtwdev->chip->chip_id != RTL8852A) 2257 return; 2258 2259 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2260 } 2261 2262 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2263 { 2264 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2265 2266 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) 2267 return; 2268 2269 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2270 } 2271 2272 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2273 { 2274 int ret; 2275 2276 if (rtwdev->chip->chip_id != RTL8852A) 2277 return 0; 2278 2279 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2280 PCIE_PHY_GEN1); 2281 if (ret) 2282 return ret; 2283 2284 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2285 PCIE_PHY_GEN2); 2286 if (ret) 2287 return ret; 2288 2289 return 0; 2290 } 2291 2292 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2293 { 2294 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2295 2296 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) 2297 return; 2298 2299 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2300 } 2301 2302 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2303 { 2304 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2305 2306 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 2307 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2308 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2309 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2310 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2311 } else if (rtwdev->chip->chip_id == RTL8852C) { 2312 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2313 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2314 } 2315 } 2316 2317 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2318 { 2319 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2320 2321 if (chip_id != RTL8852B && chip_id != RTL8851B) 2322 return 0; 2323 2324 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2325 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2326 } 2327 2328 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2329 { 2330 if (pwr_up) 2331 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2332 else 2333 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2334 } 2335 2336 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2337 { 2338 if (rtwdev->chip->chip_id != RTL8852C) 2339 return; 2340 2341 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2342 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2343 } 2344 2345 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2346 { 2347 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2348 return; 2349 2350 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2351 } 2352 2353 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2354 { 2355 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2356 return; 2357 2358 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2359 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2360 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2361 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2362 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2363 } 2364 2365 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2366 { 2367 if (rtwdev->chip->chip_id != RTL8852C) 2368 return; 2369 2370 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2371 } 2372 2373 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2374 { 2375 if (rtwdev->chip->chip_id != RTL8852C) 2376 return; 2377 2378 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2379 } 2380 2381 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2382 { 2383 if (rtwdev->chip->chip_id == RTL8852C) 2384 return; 2385 2386 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2387 B_AX_SIC_EN_FORCE_CLKREQ); 2388 } 2389 2390 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2391 { 2392 const struct rtw89_pci_info *info = rtwdev->pci_info; 2393 u32 lbc; 2394 2395 if (rtwdev->chip->chip_id == RTL8852C) 2396 return; 2397 2398 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2399 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2400 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2401 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2402 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2403 } else { 2404 lbc &= ~B_AX_LBC_EN; 2405 } 2406 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2407 } 2408 2409 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2410 { 2411 const struct rtw89_pci_info *info = rtwdev->pci_info; 2412 u32 val32; 2413 2414 if (rtwdev->chip->chip_id != RTL8852C) 2415 return; 2416 2417 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2418 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2419 info->io_rcy_tmr); 2420 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2421 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2422 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2423 2424 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2425 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2426 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2427 } else { 2428 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2429 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2430 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2431 } 2432 2433 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2434 } 2435 2436 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2437 { 2438 if (rtwdev->chip->chip_id == RTL8852C) 2439 return; 2440 2441 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2442 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2443 2444 if (rtwdev->chip->chip_id == RTL8852A) 2445 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2446 B_AX_EN_CHKDSC_NO_RX_STUCK); 2447 } 2448 2449 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2450 { 2451 if (rtwdev->chip->chip_id == RTL8852C) 2452 return; 2453 2454 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2455 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2456 } 2457 2458 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) 2459 { 2460 const struct rtw89_pci_info *info = rtwdev->pci_info; 2461 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2462 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2463 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2464 B_AX_CLR_CH12_IDX; 2465 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2466 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2467 2468 if (chip_id == RTL8852A || chip_id == RTL8852C) 2469 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2470 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2471 /* clear DMA indexes */ 2472 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2473 if (chip_id == RTL8852A || chip_id == RTL8852C) 2474 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2475 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2476 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2477 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2478 } 2479 2480 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2481 { 2482 const struct rtw89_pci_info *info = rtwdev->pci_info; 2483 u32 ret, check, dma_busy; 2484 u32 dma_busy1 = info->dma_busy1.addr; 2485 u32 dma_busy2 = info->dma_busy2_reg; 2486 2487 check = info->dma_busy1.mask; 2488 2489 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2490 10, 100, false, rtwdev, dma_busy1); 2491 if (ret) 2492 return ret; 2493 2494 if (!dma_busy2) 2495 return 0; 2496 2497 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2498 2499 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2500 10, 100, false, rtwdev, dma_busy2); 2501 if (ret) 2502 return ret; 2503 2504 return 0; 2505 } 2506 2507 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2508 { 2509 const struct rtw89_pci_info *info = rtwdev->pci_info; 2510 u32 ret, check, dma_busy; 2511 u32 dma_busy3 = info->dma_busy3_reg; 2512 2513 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2514 2515 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2516 10, 100, false, rtwdev, dma_busy3); 2517 if (ret) 2518 return ret; 2519 2520 return 0; 2521 } 2522 2523 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2524 { 2525 u32 ret; 2526 2527 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2528 if (ret) { 2529 rtw89_err(rtwdev, "txdma ch busy\n"); 2530 return ret; 2531 } 2532 2533 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2534 if (ret) { 2535 rtw89_err(rtwdev, "rxdma ch busy\n"); 2536 return ret; 2537 } 2538 2539 return 0; 2540 } 2541 2542 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2543 { 2544 const struct rtw89_pci_info *info = rtwdev->pci_info; 2545 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2546 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2547 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2548 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2549 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2550 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2551 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2552 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2553 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2554 u8 cv = rtwdev->hal.cv; 2555 u32 val32; 2556 2557 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2558 if (chip_id == RTL8852A && cv == CHIP_CBV) 2559 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2560 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2561 if (chip_id == RTL8852A || chip_id == RTL8852B) 2562 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2563 } 2564 2565 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2566 if (chip_id == RTL8852A && cv == CHIP_CBV) 2567 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2568 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2569 if (chip_id == RTL8852A || chip_id == RTL8852B) 2570 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2571 } 2572 2573 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2574 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2575 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2576 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2577 2578 if (chip_id == RTL8852A || chip_id == RTL8852B) 2579 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2580 B_AX_PCIE_RX_APPLEN_MASK, 0); 2581 } 2582 2583 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2584 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2585 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2586 } else if (chip_id == RTL8852C) { 2587 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2588 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2589 } 2590 2591 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2592 if (tag_mode == MAC_AX_TAG_SGL) { 2593 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2594 ~B_AX_LATENCY_CONTROL; 2595 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2596 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2597 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2598 B_AX_LATENCY_CONTROL; 2599 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2600 } 2601 } 2602 2603 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2604 info->multi_tag_num); 2605 2606 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2607 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2608 wd_dma_idle_intvl); 2609 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2610 wd_dma_act_intvl); 2611 } else if (chip_id == RTL8852C) { 2612 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2613 wd_dma_idle_intvl); 2614 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2615 wd_dma_act_intvl); 2616 } 2617 2618 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2619 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2620 B_AX_HOST_ADDR_INFO_8B_SEL); 2621 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2622 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2623 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2624 B_AX_HOST_ADDR_INFO_8B_SEL); 2625 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2626 } 2627 2628 return 0; 2629 } 2630 2631 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2632 { 2633 const struct rtw89_pci_info *info = rtwdev->pci_info; 2634 2635 if (rtwdev->chip->chip_id == RTL8852A) { 2636 /* ltr sw trigger */ 2637 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2638 } 2639 info->ltr_set(rtwdev, false); 2640 rtw89_pci_ctrl_dma_all(rtwdev, false); 2641 rtw89_pci_clr_idx_all(rtwdev); 2642 2643 return 0; 2644 } 2645 2646 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) 2647 { 2648 const struct rtw89_pci_info *info = rtwdev->pci_info; 2649 int ret; 2650 2651 rtw89_pci_rxdma_prefth(rtwdev); 2652 rtw89_pci_l1off_pwroff(rtwdev); 2653 rtw89_pci_deglitch_setting(rtwdev); 2654 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2655 if (ret) { 2656 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2657 return ret; 2658 } 2659 2660 rtw89_pci_aphy_pwrcut(rtwdev); 2661 rtw89_pci_hci_ldo(rtwdev); 2662 rtw89_pci_dphy_delay(rtwdev); 2663 2664 ret = rtw89_pci_autok_x(rtwdev); 2665 if (ret) { 2666 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2667 return ret; 2668 } 2669 2670 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2671 if (ret) { 2672 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2673 return ret; 2674 } 2675 2676 rtw89_pci_power_wake(rtwdev, true); 2677 rtw89_pci_autoload_hang(rtwdev); 2678 rtw89_pci_l12_vmain(rtwdev); 2679 rtw89_pci_gen2_force_ib(rtwdev); 2680 rtw89_pci_l1_ent_lat(rtwdev); 2681 rtw89_pci_wd_exit_l1(rtwdev); 2682 rtw89_pci_set_sic(rtwdev); 2683 rtw89_pci_set_lbc(rtwdev); 2684 rtw89_pci_set_io_rcy(rtwdev); 2685 rtw89_pci_set_dbg(rtwdev); 2686 rtw89_pci_set_keep_reg(rtwdev); 2687 2688 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2689 2690 /* stop DMA activities */ 2691 rtw89_pci_ctrl_dma_all(rtwdev, false); 2692 2693 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2694 if (ret) { 2695 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2696 return ret; 2697 } 2698 2699 rtw89_pci_clr_idx_all(rtwdev); 2700 rtw89_pci_mode_op(rtwdev); 2701 2702 /* fill TRX BD indexes */ 2703 rtw89_pci_ops_reset(rtwdev); 2704 2705 ret = rtw89_pci_rst_bdram_ax(rtwdev); 2706 if (ret) { 2707 rtw89_warn(rtwdev, "reset bdram busy\n"); 2708 return ret; 2709 } 2710 2711 /* disable all channels except to FW CMD channel to download firmware */ 2712 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false); 2713 rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true); 2714 2715 /* start DMA activities */ 2716 rtw89_pci_ctrl_dma_all(rtwdev, true); 2717 2718 return 0; 2719 } 2720 2721 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2722 { 2723 u32 val; 2724 2725 if (!en) 2726 return 0; 2727 2728 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2729 if (rtw89_pci_ltr_is_err_reg_val(val)) 2730 return -EINVAL; 2731 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2732 if (rtw89_pci_ltr_is_err_reg_val(val)) 2733 return -EINVAL; 2734 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2735 if (rtw89_pci_ltr_is_err_reg_val(val)) 2736 return -EINVAL; 2737 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2738 if (rtw89_pci_ltr_is_err_reg_val(val)) 2739 return -EINVAL; 2740 2741 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 2742 B_AX_LTR_WD_NOEMP_CHK); 2743 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2744 PCI_LTR_SPC_500US); 2745 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2746 PCI_LTR_IDLE_TIMER_3_2MS); 2747 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2748 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2749 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 2750 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2751 2752 return 0; 2753 } 2754 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2755 2756 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2757 { 2758 u32 dec_ctrl; 2759 u32 val32; 2760 2761 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2762 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2763 return -EINVAL; 2764 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2765 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2766 return -EINVAL; 2767 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2768 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2769 return -EINVAL; 2770 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2771 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2772 return -EINVAL; 2773 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2774 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2775 return -EINVAL; 2776 2777 if (!en) { 2778 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2779 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2780 B_AX_LTR_REQ_DRV; 2781 } else { 2782 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2783 } 2784 2785 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2786 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2787 2788 if (en) 2789 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2790 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2791 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2792 PCI_LTR_IDLE_TIMER_3_2MS); 2793 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2794 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2795 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2796 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2797 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2798 2799 return 0; 2800 } 2801 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2802 2803 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) 2804 { 2805 const struct rtw89_pci_info *info = rtwdev->pci_info; 2806 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2807 int ret; 2808 2809 ret = info->ltr_set(rtwdev, true); 2810 if (ret) { 2811 rtw89_err(rtwdev, "pci ltr set fail\n"); 2812 return ret; 2813 } 2814 if (chip_id == RTL8852A) { 2815 /* ltr sw trigger */ 2816 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2817 } 2818 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2819 /* ADDR info 8-byte mode */ 2820 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2821 B_AX_HOST_ADDR_INFO_8B_SEL); 2822 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2823 } 2824 2825 /* enable DMA for all queues */ 2826 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true); 2827 2828 /* Release PCI IO */ 2829 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 2830 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2831 2832 return 0; 2833 } 2834 2835 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2836 struct pci_dev *pdev) 2837 { 2838 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2839 int ret; 2840 2841 ret = pci_enable_device(pdev); 2842 if (ret) { 2843 rtw89_err(rtwdev, "failed to enable pci device\n"); 2844 return ret; 2845 } 2846 2847 pci_set_master(pdev); 2848 pci_set_drvdata(pdev, rtwdev->hw); 2849 2850 rtwpci->pdev = pdev; 2851 2852 return 0; 2853 } 2854 2855 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2856 struct pci_dev *pdev) 2857 { 2858 pci_disable_device(pdev); 2859 } 2860 2861 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2862 struct pci_dev *pdev) 2863 { 2864 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2865 unsigned long resource_len; 2866 u8 bar_id = 2; 2867 int ret; 2868 2869 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2870 if (ret) { 2871 rtw89_err(rtwdev, "failed to request pci regions\n"); 2872 goto err; 2873 } 2874 2875 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2876 if (ret) { 2877 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2878 goto err_release_regions; 2879 } 2880 2881 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2882 if (ret) { 2883 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2884 goto err_release_regions; 2885 } 2886 2887 resource_len = pci_resource_len(pdev, bar_id); 2888 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2889 if (!rtwpci->mmap) { 2890 rtw89_err(rtwdev, "failed to map pci io\n"); 2891 ret = -EIO; 2892 goto err_release_regions; 2893 } 2894 2895 return 0; 2896 2897 err_release_regions: 2898 pci_release_regions(pdev); 2899 err: 2900 return ret; 2901 } 2902 2903 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2904 struct pci_dev *pdev) 2905 { 2906 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2907 2908 if (rtwpci->mmap) { 2909 pci_iounmap(pdev, rtwpci->mmap); 2910 pci_release_regions(pdev); 2911 } 2912 } 2913 2914 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2915 struct pci_dev *pdev, 2916 struct rtw89_pci_tx_ring *tx_ring) 2917 { 2918 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2919 u8 *head = wd_ring->head; 2920 dma_addr_t dma = wd_ring->dma; 2921 u32 page_size = wd_ring->page_size; 2922 u32 page_num = wd_ring->page_num; 2923 u32 ring_sz = page_size * page_num; 2924 2925 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2926 wd_ring->head = NULL; 2927 } 2928 2929 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2930 struct pci_dev *pdev, 2931 struct rtw89_pci_tx_ring *tx_ring) 2932 { 2933 int ring_sz; 2934 u8 *head; 2935 dma_addr_t dma; 2936 2937 head = tx_ring->bd_ring.head; 2938 dma = tx_ring->bd_ring.dma; 2939 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2940 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2941 2942 tx_ring->bd_ring.head = NULL; 2943 } 2944 2945 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2946 struct pci_dev *pdev) 2947 { 2948 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2949 const struct rtw89_pci_info *info = rtwdev->pci_info; 2950 struct rtw89_pci_tx_ring *tx_ring; 2951 int i; 2952 2953 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2954 if (info->tx_dma_ch_mask & BIT(i)) 2955 continue; 2956 tx_ring = &rtwpci->tx_rings[i]; 2957 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2958 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2959 } 2960 } 2961 2962 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2963 struct pci_dev *pdev, 2964 struct rtw89_pci_rx_ring *rx_ring) 2965 { 2966 struct rtw89_pci_rx_info *rx_info; 2967 struct sk_buff *skb; 2968 dma_addr_t dma; 2969 u32 buf_sz; 2970 u8 *head; 2971 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2972 int i; 2973 2974 buf_sz = rx_ring->buf_sz; 2975 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2976 skb = rx_ring->buf[i]; 2977 if (!skb) 2978 continue; 2979 2980 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2981 dma = rx_info->dma; 2982 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2983 dev_kfree_skb(skb); 2984 rx_ring->buf[i] = NULL; 2985 } 2986 2987 head = rx_ring->bd_ring.head; 2988 dma = rx_ring->bd_ring.dma; 2989 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2990 2991 rx_ring->bd_ring.head = NULL; 2992 } 2993 2994 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2995 struct pci_dev *pdev) 2996 { 2997 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2998 struct rtw89_pci_rx_ring *rx_ring; 2999 int i; 3000 3001 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3002 rx_ring = &rtwpci->rx_rings[i]; 3003 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3004 } 3005 } 3006 3007 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 3008 struct pci_dev *pdev) 3009 { 3010 rtw89_pci_free_rx_rings(rtwdev, pdev); 3011 rtw89_pci_free_tx_rings(rtwdev, pdev); 3012 } 3013 3014 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 3015 struct rtw89_pci_rx_ring *rx_ring, 3016 struct sk_buff *skb, int buf_sz, u32 idx) 3017 { 3018 struct rtw89_pci_rx_info *rx_info; 3019 struct rtw89_pci_rx_bd_32 *rx_bd; 3020 dma_addr_t dma; 3021 3022 if (!skb) 3023 return -EINVAL; 3024 3025 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 3026 if (dma_mapping_error(&pdev->dev, dma)) 3027 return -EBUSY; 3028 3029 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3030 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 3031 3032 memset(rx_bd, 0, sizeof(*rx_bd)); 3033 rx_bd->buf_size = cpu_to_le16(buf_sz); 3034 rx_bd->dma = cpu_to_le32(dma); 3035 rx_info->dma = dma; 3036 3037 return 0; 3038 } 3039 3040 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 3041 struct pci_dev *pdev, 3042 struct rtw89_pci_tx_ring *tx_ring, 3043 enum rtw89_tx_channel txch) 3044 { 3045 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3046 struct rtw89_pci_tx_wd *txwd; 3047 dma_addr_t dma; 3048 dma_addr_t cur_paddr; 3049 u8 *head; 3050 u8 *cur_vaddr; 3051 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 3052 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 3053 u32 ring_sz = page_size * page_num; 3054 u32 page_offset; 3055 int i; 3056 3057 /* FWCMD queue doesn't use txwd as pages */ 3058 if (txch == RTW89_TXCH_CH12) 3059 return 0; 3060 3061 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3062 if (!head) 3063 return -ENOMEM; 3064 3065 INIT_LIST_HEAD(&wd_ring->free_pages); 3066 wd_ring->head = head; 3067 wd_ring->dma = dma; 3068 wd_ring->page_size = page_size; 3069 wd_ring->page_num = page_num; 3070 3071 page_offset = 0; 3072 for (i = 0; i < page_num; i++) { 3073 txwd = &wd_ring->pages[i]; 3074 cur_paddr = dma + page_offset; 3075 cur_vaddr = head + page_offset; 3076 3077 skb_queue_head_init(&txwd->queue); 3078 INIT_LIST_HEAD(&txwd->list); 3079 txwd->paddr = cur_paddr; 3080 txwd->vaddr = cur_vaddr; 3081 txwd->len = page_size; 3082 txwd->seq = i; 3083 rtw89_pci_enqueue_txwd(tx_ring, txwd); 3084 3085 page_offset += page_size; 3086 } 3087 3088 return 0; 3089 } 3090 3091 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 3092 struct pci_dev *pdev, 3093 struct rtw89_pci_tx_ring *tx_ring, 3094 u32 desc_size, u32 len, 3095 enum rtw89_tx_channel txch) 3096 { 3097 const struct rtw89_pci_ch_dma_addr *txch_addr; 3098 int ring_sz = desc_size * len; 3099 u8 *head; 3100 dma_addr_t dma; 3101 int ret; 3102 3103 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 3104 if (ret) { 3105 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 3106 goto err; 3107 } 3108 3109 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3110 if (ret) { 3111 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3112 goto err_free_wd_ring; 3113 } 3114 3115 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3116 if (!head) { 3117 ret = -ENOMEM; 3118 goto err_free_wd_ring; 3119 } 3120 3121 INIT_LIST_HEAD(&tx_ring->busy_pages); 3122 tx_ring->bd_ring.head = head; 3123 tx_ring->bd_ring.dma = dma; 3124 tx_ring->bd_ring.len = len; 3125 tx_ring->bd_ring.desc_size = desc_size; 3126 tx_ring->bd_ring.addr = *txch_addr; 3127 tx_ring->bd_ring.wp = 0; 3128 tx_ring->bd_ring.rp = 0; 3129 tx_ring->txch = txch; 3130 3131 return 0; 3132 3133 err_free_wd_ring: 3134 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3135 err: 3136 return ret; 3137 } 3138 3139 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 3140 struct pci_dev *pdev) 3141 { 3142 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3143 const struct rtw89_pci_info *info = rtwdev->pci_info; 3144 struct rtw89_pci_tx_ring *tx_ring; 3145 u32 desc_size; 3146 u32 len; 3147 u32 i, tx_allocated; 3148 int ret; 3149 3150 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3151 if (info->tx_dma_ch_mask & BIT(i)) 3152 continue; 3153 tx_ring = &rtwpci->tx_rings[i]; 3154 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3155 len = RTW89_PCI_TXBD_NUM_MAX; 3156 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3157 desc_size, len, i); 3158 if (ret) { 3159 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3160 goto err_free; 3161 } 3162 } 3163 3164 return 0; 3165 3166 err_free: 3167 tx_allocated = i; 3168 for (i = 0; i < tx_allocated; i++) { 3169 tx_ring = &rtwpci->tx_rings[i]; 3170 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3171 } 3172 3173 return ret; 3174 } 3175 3176 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3177 struct pci_dev *pdev, 3178 struct rtw89_pci_rx_ring *rx_ring, 3179 u32 desc_size, u32 len, u32 rxch) 3180 { 3181 const struct rtw89_pci_info *info = rtwdev->pci_info; 3182 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3183 struct sk_buff *skb; 3184 u8 *head; 3185 dma_addr_t dma; 3186 int ring_sz = desc_size * len; 3187 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3188 int i, allocated; 3189 int ret; 3190 3191 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3192 if (ret) { 3193 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3194 return ret; 3195 } 3196 3197 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3198 if (!head) { 3199 ret = -ENOMEM; 3200 goto err; 3201 } 3202 3203 rx_ring->bd_ring.head = head; 3204 rx_ring->bd_ring.dma = dma; 3205 rx_ring->bd_ring.len = len; 3206 rx_ring->bd_ring.desc_size = desc_size; 3207 rx_ring->bd_ring.addr = *rxch_addr; 3208 if (info->rx_ring_eq_is_full) 3209 rx_ring->bd_ring.wp = len - 1; 3210 else 3211 rx_ring->bd_ring.wp = 0; 3212 rx_ring->bd_ring.rp = 0; 3213 rx_ring->buf_sz = buf_sz; 3214 rx_ring->diliver_skb = NULL; 3215 rx_ring->diliver_desc.ready = false; 3216 3217 for (i = 0; i < len; i++) { 3218 skb = dev_alloc_skb(buf_sz); 3219 if (!skb) { 3220 ret = -ENOMEM; 3221 goto err_free; 3222 } 3223 3224 memset(skb->data, 0, buf_sz); 3225 rx_ring->buf[i] = skb; 3226 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3227 buf_sz, i); 3228 if (ret) { 3229 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3230 dev_kfree_skb_any(skb); 3231 rx_ring->buf[i] = NULL; 3232 goto err_free; 3233 } 3234 } 3235 3236 return 0; 3237 3238 err_free: 3239 allocated = i; 3240 for (i = 0; i < allocated; i++) { 3241 skb = rx_ring->buf[i]; 3242 if (!skb) 3243 continue; 3244 dma = *((dma_addr_t *)skb->cb); 3245 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3246 dev_kfree_skb(skb); 3247 rx_ring->buf[i] = NULL; 3248 } 3249 3250 head = rx_ring->bd_ring.head; 3251 dma = rx_ring->bd_ring.dma; 3252 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3253 3254 rx_ring->bd_ring.head = NULL; 3255 err: 3256 return ret; 3257 } 3258 3259 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3260 struct pci_dev *pdev) 3261 { 3262 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3263 struct rtw89_pci_rx_ring *rx_ring; 3264 u32 desc_size; 3265 u32 len; 3266 int i, rx_allocated; 3267 int ret; 3268 3269 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3270 rx_ring = &rtwpci->rx_rings[i]; 3271 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3272 len = RTW89_PCI_RXBD_NUM_MAX; 3273 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3274 desc_size, len, i); 3275 if (ret) { 3276 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3277 goto err_free; 3278 } 3279 } 3280 3281 return 0; 3282 3283 err_free: 3284 rx_allocated = i; 3285 for (i = 0; i < rx_allocated; i++) { 3286 rx_ring = &rtwpci->rx_rings[i]; 3287 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3288 } 3289 3290 return ret; 3291 } 3292 3293 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3294 struct pci_dev *pdev) 3295 { 3296 int ret; 3297 3298 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3299 if (ret) { 3300 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3301 goto err; 3302 } 3303 3304 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3305 if (ret) { 3306 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3307 goto err_free_tx_rings; 3308 } 3309 3310 return 0; 3311 3312 err_free_tx_rings: 3313 rtw89_pci_free_tx_rings(rtwdev, pdev); 3314 err: 3315 return ret; 3316 } 3317 3318 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3319 struct rtw89_pci *rtwpci) 3320 { 3321 skb_queue_head_init(&rtwpci->h2c_queue); 3322 skb_queue_head_init(&rtwpci->h2c_release_queue); 3323 } 3324 3325 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3326 struct pci_dev *pdev) 3327 { 3328 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3329 int ret; 3330 3331 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3332 if (ret) { 3333 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3334 goto err; 3335 } 3336 3337 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3338 if (ret) { 3339 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3340 goto err_pci_unmap; 3341 } 3342 3343 rtw89_pci_h2c_init(rtwdev, rtwpci); 3344 3345 spin_lock_init(&rtwpci->irq_lock); 3346 spin_lock_init(&rtwpci->trx_lock); 3347 3348 return 0; 3349 3350 err_pci_unmap: 3351 rtw89_pci_clear_mapping(rtwdev, pdev); 3352 err: 3353 return ret; 3354 } 3355 3356 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3357 struct pci_dev *pdev) 3358 { 3359 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3360 3361 rtw89_pci_free_trx_rings(rtwdev, pdev); 3362 rtw89_pci_clear_mapping(rtwdev, pdev); 3363 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3364 skb_queue_len(&rtwpci->h2c_queue), true); 3365 } 3366 3367 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3368 { 3369 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3370 const struct rtw89_chip_info *chip = rtwdev->chip; 3371 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; 3372 3373 if (chip->chip_id == RTL8851B) 3374 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; 3375 3376 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3377 3378 if (rtwpci->under_recovery) { 3379 rtwpci->intrs[0] = hs0isr_ind_int_en; 3380 rtwpci->intrs[1] = 0; 3381 } else { 3382 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3383 B_AX_RXDMA_INT_EN | 3384 B_AX_RXP1DMA_INT_EN | 3385 B_AX_RPQDMA_INT_EN | 3386 B_AX_RXDMA_STUCK_INT_EN | 3387 B_AX_RDU_INT_EN | 3388 B_AX_RPQBD_FULL_INT_EN | 3389 hs0isr_ind_int_en; 3390 3391 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3392 } 3393 } 3394 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3395 3396 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3397 { 3398 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3399 3400 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3401 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3402 rtwpci->intrs[0] = 0; 3403 rtwpci->intrs[1] = 0; 3404 } 3405 3406 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3407 { 3408 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3409 3410 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3411 B_AX_HS1ISR_IND_INT_EN | 3412 B_AX_HS0ISR_IND_INT_EN; 3413 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3414 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3415 B_AX_RXDMA_INT_EN | 3416 B_AX_RXP1DMA_INT_EN | 3417 B_AX_RPQDMA_INT_EN | 3418 B_AX_RXDMA_STUCK_INT_EN | 3419 B_AX_RDU_INT_EN | 3420 B_AX_RPQBD_FULL_INT_EN; 3421 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3422 } 3423 3424 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3425 { 3426 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3427 3428 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3429 B_AX_HS0ISR_IND_INT_EN; 3430 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3431 rtwpci->intrs[0] = 0; 3432 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3433 } 3434 3435 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3436 { 3437 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3438 3439 if (rtwpci->under_recovery) 3440 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3441 else if (rtwpci->low_power) 3442 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3443 else 3444 rtw89_pci_default_intr_mask_v1(rtwdev); 3445 } 3446 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3447 3448 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev) 3449 { 3450 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3451 3452 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3453 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3454 rtwpci->intrs[0] = 0; 3455 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3456 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3457 } 3458 3459 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev) 3460 { 3461 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3462 3463 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 | 3464 B_BE_HS0_IND_INT_EN0; 3465 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3466 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 | 3467 B_BE_RDU_CH0_INT_IMR_V1; 3468 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3469 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3470 } 3471 3472 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev) 3473 { 3474 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3475 3476 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 | 3477 B_BE_HS1_IND_INT_EN0; 3478 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3479 rtwpci->intrs[0] = 0; 3480 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3481 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3482 } 3483 3484 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev) 3485 { 3486 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3487 3488 if (rtwpci->under_recovery) 3489 rtw89_pci_recovery_intr_mask_v2(rtwdev); 3490 else if (rtwpci->low_power) 3491 rtw89_pci_low_power_intr_mask_v2(rtwdev); 3492 else 3493 rtw89_pci_default_intr_mask_v2(rtwdev); 3494 } 3495 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); 3496 3497 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3498 struct pci_dev *pdev) 3499 { 3500 unsigned long flags = 0; 3501 int ret; 3502 3503 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3504 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3505 if (ret < 0) { 3506 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3507 goto err; 3508 } 3509 3510 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3511 rtw89_pci_interrupt_handler, 3512 rtw89_pci_interrupt_threadfn, 3513 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3514 if (ret) { 3515 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3516 goto err_free_vector; 3517 } 3518 3519 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3520 3521 return 0; 3522 3523 err_free_vector: 3524 pci_free_irq_vectors(pdev); 3525 err: 3526 return ret; 3527 } 3528 3529 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3530 struct pci_dev *pdev) 3531 { 3532 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3533 pci_free_irq_vectors(pdev); 3534 } 3535 3536 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3537 { 3538 u16 bin = 0, gray_bit; 3539 u32 bit_idx; 3540 3541 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3542 gray_bit = (gray_code >> bit_idx) & 0x1; 3543 if (bit_num - bit_idx > 1) 3544 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3545 bin |= (gray_bit << bit_idx); 3546 } 3547 3548 return bin; 3549 } 3550 3551 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3552 { 3553 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3554 struct pci_dev *pdev = rtwpci->pdev; 3555 u16 val16, filter_out_val; 3556 u32 val, phy_offset; 3557 int ret; 3558 3559 if (rtwdev->chip->chip_id != RTL8852C) 3560 return 0; 3561 3562 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3563 if (val == B_AX_ASPM_CTRL_L1) 3564 return 0; 3565 3566 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3567 if (ret) 3568 return ret; 3569 3570 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3571 if (val == RTW89_PCIE_GEN1_SPEED) { 3572 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3573 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3574 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3575 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3576 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3577 val16 | B_PCIE_BIT_PINOUT_DIS); 3578 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3579 val16 & ~B_PCIE_BIT_RD_SEL); 3580 3581 val16 = rtw89_read16_mask(rtwdev, 3582 phy_offset + RAC_ANA1F * RAC_MULT, 3583 FILTER_OUT_EQ_MASK); 3584 val16 = gray_code_to_bin(val16, hweight16(val16)); 3585 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3586 RAC_MULT); 3587 filter_out_val &= ~REG_FILTER_OUT_MASK; 3588 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3589 3590 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3591 filter_out_val); 3592 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3593 B_BAC_EQ_SEL); 3594 rtw89_write16_set(rtwdev, 3595 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3596 B_PCIE_BIT_PSAVE); 3597 } else { 3598 return -EOPNOTSUPP; 3599 } 3600 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3601 B_PCIE_BIT_PSAVE); 3602 3603 return 0; 3604 } 3605 3606 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3607 { 3608 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3609 int ret; 3610 3611 if (rtw89_pci_disable_clkreq) 3612 return; 3613 3614 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3615 PCIE_CLKDLY_HW_30US); 3616 if (ret) 3617 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3618 3619 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3620 if (enable) 3621 ret = rtw89_pci_config_byte_set(rtwdev, 3622 RTW89_PCIE_L1_CTRL, 3623 RTW89_PCIE_BIT_CLK); 3624 else 3625 ret = rtw89_pci_config_byte_clr(rtwdev, 3626 RTW89_PCIE_L1_CTRL, 3627 RTW89_PCIE_BIT_CLK); 3628 if (ret) 3629 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3630 enable ? "set" : "unset", ret); 3631 } else if (chip_id == RTL8852C) { 3632 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3633 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3634 if (enable) 3635 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3636 B_AX_CLK_REQ_N); 3637 else 3638 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3639 B_AX_CLK_REQ_N); 3640 } 3641 } 3642 3643 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3644 { 3645 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3646 u8 value = 0; 3647 int ret; 3648 3649 if (rtw89_pci_disable_aspm_l1) 3650 return; 3651 3652 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3653 if (ret) 3654 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3655 3656 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3657 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3658 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3659 3660 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3661 if (ret) 3662 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3663 3664 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3665 if (enable) 3666 ret = rtw89_pci_config_byte_set(rtwdev, 3667 RTW89_PCIE_L1_CTRL, 3668 RTW89_PCIE_BIT_L1); 3669 else 3670 ret = rtw89_pci_config_byte_clr(rtwdev, 3671 RTW89_PCIE_L1_CTRL, 3672 RTW89_PCIE_BIT_L1); 3673 } else if (chip_id == RTL8852C) { 3674 if (enable) 3675 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3676 B_AX_ASPM_CTRL_L1); 3677 else 3678 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3679 B_AX_ASPM_CTRL_L1); 3680 } 3681 if (ret) 3682 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3683 enable ? "set" : "unset", ret); 3684 } 3685 3686 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3687 { 3688 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 3689 const struct rtw89_pci_info *info = rtwdev->pci_info; 3690 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3691 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3692 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3693 u32 val = 0; 3694 3695 if (rtwdev->scanning || 3696 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH)) 3697 goto out; 3698 3699 if (chip_gen == RTW89_CHIP_BE) 3700 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN; 3701 else 3702 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3703 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3704 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3705 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3706 3707 out: 3708 rtw89_write32(rtwdev, info->mit_addr, val); 3709 } 3710 3711 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3712 { 3713 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3714 struct pci_dev *pdev = rtwpci->pdev; 3715 u16 link_ctrl; 3716 int ret; 3717 3718 /* Though there is standard PCIE configuration space to set the 3719 * link control register, but by Realtek's design, driver should 3720 * check if host supports CLKREQ/ASPM to enable the HW module. 3721 * 3722 * These functions are implemented by two HW modules associated, 3723 * one is responsible to access PCIE configuration space to 3724 * follow the host settings, and another is in charge of doing 3725 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3726 * the host does not support it, and due to some reasons or wrong 3727 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3728 * loss if HW misbehaves on the link. 3729 * 3730 * Hence it's designed that driver should first check the PCIE 3731 * configuration space is sync'ed and enabled, then driver can turn 3732 * on the other module that is actually working on the mechanism. 3733 */ 3734 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3735 if (ret) { 3736 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3737 return; 3738 } 3739 3740 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3741 rtw89_pci_clkreq_set(rtwdev, true); 3742 3743 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3744 rtw89_pci_aspm_set(rtwdev, true); 3745 } 3746 3747 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3748 { 3749 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3750 int ret; 3751 3752 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3753 if (enable) 3754 ret = rtw89_pci_config_byte_set(rtwdev, 3755 RTW89_PCIE_TIMER_CTRL, 3756 RTW89_PCIE_BIT_L1SUB); 3757 else 3758 ret = rtw89_pci_config_byte_clr(rtwdev, 3759 RTW89_PCIE_TIMER_CTRL, 3760 RTW89_PCIE_BIT_L1SUB); 3761 if (ret) 3762 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3763 enable ? "set" : "unset", ret); 3764 } else if (chip_id == RTL8852C) { 3765 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 3766 RTW89_PCIE_BIT_ASPM_L11 | 3767 RTW89_PCIE_BIT_PCI_L11); 3768 if (ret) 3769 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 3770 if (enable) 3771 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3772 B_AX_L1SUB_DISABLE); 3773 else 3774 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3775 B_AX_L1SUB_DISABLE); 3776 } 3777 } 3778 3779 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3780 { 3781 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3782 struct pci_dev *pdev = rtwpci->pdev; 3783 u32 l1ss_cap_ptr, l1ss_ctrl; 3784 3785 if (rtw89_pci_disable_l1ss) 3786 return; 3787 3788 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3789 if (!l1ss_cap_ptr) 3790 return; 3791 3792 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3793 3794 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3795 rtw89_pci_l1ss_set(rtwdev, true); 3796 } 3797 3798 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev) 3799 { 3800 int ret = 0; 3801 u32 sts; 3802 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3803 3804 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3805 10, 1000, false, rtwdev, 3806 R_AX_PCIE_DMA_BUSY1); 3807 if (ret) { 3808 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3809 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3810 return -EINVAL; 3811 } 3812 return ret; 3813 } 3814 3815 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev) 3816 { 3817 u32 val; 3818 int ret; 3819 3820 if (rtwdev->chip->chip_id == RTL8852C) 3821 return 0; 3822 3823 rtw89_pci_ctrl_dma_all(rtwdev, false); 3824 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 3825 if (ret) { 3826 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3827 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3828 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3829 R_AX_DBG_ERR_FLAG, val); 3830 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3831 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 3832 if (val & B_AX_RX_STUCK) 3833 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 3834 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3835 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 3836 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3837 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3838 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3839 R_AX_DBG_ERR_FLAG, val); 3840 } 3841 3842 return ret; 3843 } 3844 3845 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) 3846 { 3847 u32 ret; 3848 3849 if (rtwdev->chip->chip_id == RTL8852C) 3850 return 0; 3851 3852 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 3853 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3854 rtw89_pci_clr_idx_all(rtwdev); 3855 3856 ret = rtw89_pci_rst_bdram_ax(rtwdev); 3857 if (ret) 3858 return ret; 3859 3860 rtw89_pci_ctrl_dma_all(rtwdev, true); 3861 return ret; 3862 } 3863 3864 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3865 enum rtw89_lv1_rcvy_step step) 3866 { 3867 const struct rtw89_pci_info *info = rtwdev->pci_info; 3868 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3869 int ret; 3870 3871 switch (step) { 3872 case RTW89_LV1_RCVY_STEP_1: 3873 ret = gen_def->lv1rst_stop_dma(rtwdev); 3874 if (ret) 3875 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3876 3877 break; 3878 3879 case RTW89_LV1_RCVY_STEP_2: 3880 ret = gen_def->lv1rst_start_dma(rtwdev); 3881 if (ret) 3882 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3883 break; 3884 3885 default: 3886 return -EINVAL; 3887 } 3888 3889 return ret; 3890 } 3891 3892 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3893 { 3894 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 3895 return; 3896 3897 if (rtwdev->chip->chip_id == RTL8852C) { 3898 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3899 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1)); 3900 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3901 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1)); 3902 } else { 3903 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3904 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3905 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3906 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3907 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3908 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3909 } 3910 } 3911 3912 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3913 { 3914 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3915 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3916 const struct rtw89_pci_info *info = rtwdev->pci_info; 3917 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3918 unsigned long flags; 3919 int work_done; 3920 3921 rtwdev->napi_budget_countdown = budget; 3922 3923 rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data); 3924 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3925 if (work_done == budget) 3926 return budget; 3927 3928 rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data); 3929 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3930 if (work_done < budget && napi_complete_done(napi, work_done)) { 3931 spin_lock_irqsave(&rtwpci->irq_lock, flags); 3932 if (likely(rtwpci->running)) 3933 rtw89_chip_enable_intr(rtwdev, rtwpci); 3934 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3935 } 3936 3937 return work_done; 3938 } 3939 3940 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3941 { 3942 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3943 struct rtw89_dev *rtwdev = hw->priv; 3944 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3945 3946 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3947 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3948 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3949 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3950 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3951 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3952 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3953 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3954 } else { 3955 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3956 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3957 } 3958 3959 return 0; 3960 } 3961 3962 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3963 { 3964 if (rtwdev->chip->chip_id == RTL8852C) 3965 return; 3966 3967 /* Hardware need write the reg twice to ensure the setting work */ 3968 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3969 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3970 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3971 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3972 } 3973 3974 static int __maybe_unused rtw89_pci_resume(struct device *dev) 3975 { 3976 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3977 struct rtw89_dev *rtwdev = hw->priv; 3978 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3979 3980 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3981 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3982 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3983 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3984 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3985 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3986 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3987 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3988 } else { 3989 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3990 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3991 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3992 B_AX_SEL_REQ_ENTR_L1); 3993 } 3994 rtw89_pci_l2_hci_ldo(rtwdev); 3995 rtw89_pci_filter_out(rtwdev); 3996 rtw89_pci_link_cfg(rtwdev); 3997 rtw89_pci_l1ss_cfg(rtwdev); 3998 3999 return 0; 4000 } 4001 4002 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 4003 EXPORT_SYMBOL(rtw89_pm_ops); 4004 4005 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4006 .isr_rdu = B_AX_RDU_INT, 4007 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, 4008 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, 4009 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, 4010 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | 4011 B_AX_RDU_INT}, 4012 4013 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4014 .mac_pre_deinit = NULL, 4015 .mac_post_init = rtw89_pci_ops_mac_post_init_ax, 4016 4017 .clr_idx_all = rtw89_pci_clr_idx_all_ax, 4018 .rst_bdram = rtw89_pci_rst_bdram_ax, 4019 4020 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax, 4021 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax, 4022 }; 4023 EXPORT_SYMBOL(rtw89_pci_gen_ax); 4024 4025 static const struct rtw89_hci_ops rtw89_pci_ops = { 4026 .tx_write = rtw89_pci_ops_tx_write, 4027 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 4028 .flush_queues = rtw89_pci_ops_flush_queues, 4029 .reset = rtw89_pci_ops_reset, 4030 .start = rtw89_pci_ops_start, 4031 .stop = rtw89_pci_ops_stop, 4032 .pause = rtw89_pci_ops_pause, 4033 .switch_mode = rtw89_pci_ops_switch_mode, 4034 .recalc_int_mit = rtw89_pci_recalc_int_mit, 4035 4036 .read8 = rtw89_pci_ops_read8, 4037 .read16 = rtw89_pci_ops_read16, 4038 .read32 = rtw89_pci_ops_read32, 4039 .write8 = rtw89_pci_ops_write8, 4040 .write16 = rtw89_pci_ops_write16, 4041 .write32 = rtw89_pci_ops_write32, 4042 4043 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 4044 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit, 4045 .mac_post_init = rtw89_pci_ops_mac_post_init, 4046 .deinit = rtw89_pci_ops_deinit, 4047 4048 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 4049 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 4050 .dump_err_status = rtw89_pci_ops_dump_err_status, 4051 .napi_poll = rtw89_pci_napi_poll, 4052 4053 .recovery_start = rtw89_pci_ops_recovery_start, 4054 .recovery_complete = rtw89_pci_ops_recovery_complete, 4055 4056 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie, 4057 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie, 4058 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 4059 .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie, 4060 .clr_idx_all = rtw89_pci_clr_idx_all, 4061 .clear = rtw89_pci_clear_resource, 4062 .disable_intr = rtw89_pci_disable_intr_lock, 4063 .enable_intr = rtw89_pci_enable_intr_lock, 4064 .rst_bdram = rtw89_pci_reset_bdram, 4065 }; 4066 4067 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4068 { 4069 struct rtw89_dev *rtwdev; 4070 const struct rtw89_driver_info *info; 4071 const struct rtw89_pci_info *pci_info; 4072 int ret; 4073 4074 info = (const struct rtw89_driver_info *)id->driver_data; 4075 4076 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 4077 sizeof(struct rtw89_pci), 4078 info->chip); 4079 if (!rtwdev) { 4080 dev_err(&pdev->dev, "failed to allocate hw\n"); 4081 return -ENOMEM; 4082 } 4083 4084 pci_info = info->bus.pci; 4085 4086 rtwdev->pci_info = info->bus.pci; 4087 rtwdev->hci.ops = &rtw89_pci_ops; 4088 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 4089 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 4090 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 4091 4092 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 4093 4094 ret = rtw89_core_init(rtwdev); 4095 if (ret) { 4096 rtw89_err(rtwdev, "failed to initialise core\n"); 4097 goto err_release_hw; 4098 } 4099 4100 ret = rtw89_pci_claim_device(rtwdev, pdev); 4101 if (ret) { 4102 rtw89_err(rtwdev, "failed to claim pci device\n"); 4103 goto err_core_deinit; 4104 } 4105 4106 ret = rtw89_pci_setup_resource(rtwdev, pdev); 4107 if (ret) { 4108 rtw89_err(rtwdev, "failed to setup pci resource\n"); 4109 goto err_declaim_pci; 4110 } 4111 4112 ret = rtw89_chip_info_setup(rtwdev); 4113 if (ret) { 4114 rtw89_err(rtwdev, "failed to setup chip information\n"); 4115 goto err_clear_resource; 4116 } 4117 4118 rtw89_pci_filter_out(rtwdev); 4119 rtw89_pci_link_cfg(rtwdev); 4120 rtw89_pci_l1ss_cfg(rtwdev); 4121 4122 rtw89_core_napi_init(rtwdev); 4123 4124 ret = rtw89_pci_request_irq(rtwdev, pdev); 4125 if (ret) { 4126 rtw89_err(rtwdev, "failed to request pci irq\n"); 4127 goto err_deinit_napi; 4128 } 4129 4130 ret = rtw89_core_register(rtwdev); 4131 if (ret) { 4132 rtw89_err(rtwdev, "failed to register core\n"); 4133 goto err_free_irq; 4134 } 4135 4136 return 0; 4137 4138 err_free_irq: 4139 rtw89_pci_free_irq(rtwdev, pdev); 4140 err_deinit_napi: 4141 rtw89_core_napi_deinit(rtwdev); 4142 err_clear_resource: 4143 rtw89_pci_clear_resource(rtwdev, pdev); 4144 err_declaim_pci: 4145 rtw89_pci_declaim_device(rtwdev, pdev); 4146 err_core_deinit: 4147 rtw89_core_deinit(rtwdev); 4148 err_release_hw: 4149 rtw89_free_ieee80211_hw(rtwdev); 4150 4151 return ret; 4152 } 4153 EXPORT_SYMBOL(rtw89_pci_probe); 4154 4155 void rtw89_pci_remove(struct pci_dev *pdev) 4156 { 4157 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4158 struct rtw89_dev *rtwdev; 4159 4160 rtwdev = hw->priv; 4161 4162 rtw89_pci_free_irq(rtwdev, pdev); 4163 rtw89_core_napi_deinit(rtwdev); 4164 rtw89_core_unregister(rtwdev); 4165 rtw89_pci_clear_resource(rtwdev, pdev); 4166 rtw89_pci_declaim_device(rtwdev, pdev); 4167 rtw89_core_deinit(rtwdev); 4168 rtw89_free_ieee80211_hw(rtwdev); 4169 } 4170 EXPORT_SYMBOL(rtw89_pci_remove); 4171 4172 MODULE_AUTHOR("Realtek Corporation"); 4173 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); 4174 MODULE_LICENSE("Dual BSD/GPL"); 4175