1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #if defined(__FreeBSD__) 6 #define LINUXKPI_PARAM_PREFIX rtw89_pci_ 7 #endif 8 9 #include <linux/pci.h> 10 11 #include "mac.h" 12 #include "pci.h" 13 #include "reg.h" 14 #include "ser.h" 15 16 static bool rtw89_pci_disable_clkreq; 17 static bool rtw89_pci_disable_aspm_l1; 18 static bool rtw89_pci_disable_l1ss; 19 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 20 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 21 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 22 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 23 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 24 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 25 26 static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 27 { 28 u32 val; 29 int ret; 30 31 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 32 rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 33 34 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 35 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 36 rtwdev, R_AX_PCIE_INIT_CFG1); 37 38 if (ret) 39 return -EBUSY; 40 41 return 0; 42 } 43 44 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 45 struct rtw89_pci_dma_ring *bd_ring, 46 u32 cur_idx, bool tx) 47 { 48 u32 cnt, cur_rp, wp, rp, len; 49 50 rp = bd_ring->rp; 51 wp = bd_ring->wp; 52 len = bd_ring->len; 53 54 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 55 if (tx) 56 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 57 else 58 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 59 60 bd_ring->rp = cur_rp; 61 62 return cnt; 63 } 64 65 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 66 struct rtw89_pci_tx_ring *tx_ring) 67 { 68 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 69 u32 addr_idx = bd_ring->addr.idx; 70 u32 cnt, idx; 71 72 idx = rtw89_read32(rtwdev, addr_idx); 73 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 74 75 return cnt; 76 } 77 78 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 79 struct rtw89_pci *rtwpci, 80 u32 cnt, bool release_all) 81 { 82 struct rtw89_pci_tx_data *tx_data; 83 struct sk_buff *skb; 84 u32 qlen; 85 86 while (cnt--) { 87 skb = skb_dequeue(&rtwpci->h2c_queue); 88 if (!skb) { 89 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 90 return; 91 } 92 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 93 } 94 95 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 96 if (!release_all) 97 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 98 99 while (qlen--) { 100 skb = skb_dequeue(&rtwpci->h2c_release_queue); 101 if (!skb) { 102 rtw89_err(rtwdev, "failed to release fwcmd\n"); 103 return; 104 } 105 tx_data = RTW89_PCI_TX_SKB_CB(skb); 106 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 107 DMA_TO_DEVICE); 108 dev_kfree_skb_any(skb); 109 } 110 } 111 112 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 113 struct rtw89_pci *rtwpci) 114 { 115 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 116 u32 cnt; 117 118 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 119 if (!cnt) 120 return; 121 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 122 } 123 124 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 125 struct rtw89_pci_rx_ring *rx_ring) 126 { 127 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 128 u32 addr_idx = bd_ring->addr.idx; 129 u32 cnt, idx; 130 131 idx = rtw89_read32(rtwdev, addr_idx); 132 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 133 134 return cnt; 135 } 136 137 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 138 struct sk_buff *skb) 139 { 140 struct rtw89_pci_rx_info *rx_info; 141 dma_addr_t dma; 142 143 rx_info = RTW89_PCI_RX_SKB_CB(skb); 144 dma = rx_info->dma; 145 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 146 DMA_FROM_DEVICE); 147 } 148 149 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 150 struct sk_buff *skb) 151 { 152 struct rtw89_pci_rx_info *rx_info; 153 dma_addr_t dma; 154 155 rx_info = RTW89_PCI_RX_SKB_CB(skb); 156 dma = rx_info->dma; 157 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 158 DMA_FROM_DEVICE); 159 } 160 161 static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 162 struct sk_buff *skb) 163 { 164 struct rtw89_pci_rxbd_info *rxbd_info; 165 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 166 167 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 168 rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 169 rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 170 rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 171 rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 172 173 return 0; 174 } 175 176 static void rtw89_pci_ctrl_txdma_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 177 { 178 const struct rtw89_pci_info *info = rtwdev->pci_info; 179 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 180 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 181 182 if (enable) { 183 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 184 if (dma_stop2->addr) 185 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 186 } else { 187 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 188 if (dma_stop2->addr) 189 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 190 } 191 } 192 193 static void rtw89_pci_ctrl_txdma_fw_ch_pcie(struct rtw89_dev *rtwdev, bool enable) 194 { 195 const struct rtw89_pci_info *info = rtwdev->pci_info; 196 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 197 198 if (enable) 199 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 200 else 201 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 202 } 203 204 static bool 205 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 206 struct sk_buff *new, 207 const struct sk_buff *skb, u32 offset, 208 const struct rtw89_pci_rx_info *rx_info, 209 const struct rtw89_rx_desc_info *desc_info) 210 { 211 u32 copy_len = rx_info->len - offset; 212 213 if (unlikely(skb_tailroom(new) < copy_len)) { 214 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 215 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 216 rx_info->len, desc_info->pkt_size, offset, fs, ls); 217 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 218 skb->data, rx_info->len); 219 /* length of a single segment skb is desc_info->pkt_size */ 220 if (fs && ls) { 221 copy_len = desc_info->pkt_size; 222 } else { 223 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 224 return false; 225 } 226 } 227 228 skb_put_data(new, skb->data + offset, copy_len); 229 230 return true; 231 } 232 233 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 234 struct rtw89_pci_rx_ring *rx_ring) 235 { 236 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 237 struct rtw89_pci_rx_info *rx_info; 238 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 239 struct sk_buff *new = rx_ring->diliver_skb; 240 struct sk_buff *skb; 241 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 242 u32 offset; 243 u32 cnt = 1; 244 bool fs, ls; 245 int ret; 246 247 skb = rx_ring->buf[bd_ring->wp]; 248 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 249 250 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 251 if (ret) { 252 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 253 bd_ring->wp, ret); 254 goto err_sync_device; 255 } 256 257 rx_info = RTW89_PCI_RX_SKB_CB(skb); 258 fs = rx_info->fs; 259 ls = rx_info->ls; 260 261 if (fs) { 262 if (new) { 263 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 264 "skb should not be ready before first segment start\n"); 265 goto err_sync_device; 266 } 267 if (desc_info->ready) { 268 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 269 goto err_sync_device; 270 } 271 272 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 273 274 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 275 if (!new) 276 goto err_sync_device; 277 278 rx_ring->diliver_skb = new; 279 280 /* first segment has RX desc */ 281 offset = desc_info->offset + desc_info->rxd_len; 282 } else { 283 offset = sizeof(struct rtw89_pci_rxbd_info); 284 if (!new) { 285 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 286 goto err_sync_device; 287 } 288 } 289 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 290 goto err_sync_device; 291 rtw89_pci_sync_skb_for_device(rtwdev, skb); 292 rtw89_pci_rxbd_increase(rx_ring, 1); 293 294 if (!desc_info->ready) { 295 rtw89_warn(rtwdev, "no rx desc information\n"); 296 goto err_free_resource; 297 } 298 if (ls) { 299 rtw89_core_rx(rtwdev, desc_info, new); 300 rx_ring->diliver_skb = NULL; 301 desc_info->ready = false; 302 } 303 304 return cnt; 305 306 err_sync_device: 307 rtw89_pci_sync_skb_for_device(rtwdev, skb); 308 rtw89_pci_rxbd_increase(rx_ring, 1); 309 err_free_resource: 310 if (new) 311 dev_kfree_skb_any(new); 312 rx_ring->diliver_skb = NULL; 313 desc_info->ready = false; 314 315 return cnt; 316 } 317 318 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 319 struct rtw89_pci_rx_ring *rx_ring, 320 u32 cnt) 321 { 322 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 323 u32 rx_cnt; 324 325 while (cnt && rtwdev->napi_budget_countdown > 0) { 326 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 327 if (!rx_cnt) { 328 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 329 330 /* skip the rest RXBD bufs */ 331 rtw89_pci_rxbd_increase(rx_ring, cnt); 332 break; 333 } 334 335 cnt -= rx_cnt; 336 } 337 338 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 339 } 340 341 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 342 struct rtw89_pci *rtwpci, int budget) 343 { 344 struct rtw89_pci_rx_ring *rx_ring; 345 int countdown = rtwdev->napi_budget_countdown; 346 u32 cnt; 347 348 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 349 350 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 351 if (!cnt) 352 return 0; 353 354 cnt = min_t(u32, budget, cnt); 355 356 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 357 358 /* In case of flushing pending SKBs, the countdown may exceed. */ 359 if (rtwdev->napi_budget_countdown <= 0) 360 return budget; 361 362 return budget - countdown; 363 } 364 365 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 366 struct rtw89_pci_tx_ring *tx_ring, 367 struct sk_buff *skb, u8 tx_status) 368 { 369 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 370 struct ieee80211_tx_info *info; 371 372 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 373 374 info = IEEE80211_SKB_CB(skb); 375 ieee80211_tx_info_clear_status(info); 376 377 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 378 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 379 if (tx_status == RTW89_TX_DONE) { 380 info->flags |= IEEE80211_TX_STAT_ACK; 381 tx_ring->tx_acked++; 382 } else { 383 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 384 rtw89_debug(rtwdev, RTW89_DBG_FW, 385 "failed to TX of status %x\n", tx_status); 386 switch (tx_status) { 387 case RTW89_TX_RETRY_LIMIT: 388 tx_ring->tx_retry_lmt++; 389 break; 390 case RTW89_TX_LIFE_TIME: 391 tx_ring->tx_life_time++; 392 break; 393 case RTW89_TX_MACID_DROP: 394 tx_ring->tx_mac_id_drop++; 395 break; 396 default: 397 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 398 break; 399 } 400 } 401 402 ieee80211_tx_status_ni(rtwdev->hw, skb); 403 } 404 405 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 406 { 407 struct rtw89_pci_tx_wd *txwd; 408 u32 cnt; 409 410 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 411 while (cnt--) { 412 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 413 if (!txwd) { 414 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 415 break; 416 } 417 418 list_del_init(&txwd->list); 419 420 /* this skb has been freed by RPP */ 421 if (skb_queue_len(&txwd->queue) == 0) 422 rtw89_pci_enqueue_txwd(tx_ring, txwd); 423 } 424 } 425 426 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 427 struct rtw89_pci_tx_ring *tx_ring) 428 { 429 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 430 struct rtw89_pci_tx_wd *txwd; 431 int i; 432 433 for (i = 0; i < wd_ring->page_num; i++) { 434 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 435 if (!txwd) 436 break; 437 438 list_del_init(&txwd->list); 439 } 440 } 441 442 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 443 struct rtw89_pci_tx_ring *tx_ring, 444 struct rtw89_pci_tx_wd *txwd, u16 seq, 445 u8 tx_status) 446 { 447 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 448 struct rtw89_pci_tx_data *tx_data; 449 struct sk_buff *skb, *tmp; 450 u8 txch = tx_ring->txch; 451 452 if (!list_empty(&txwd->list)) { 453 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 454 /* In low power mode, RPP can receive before updating of TX BD. 455 * In normal mode, it should not happen so give it a warning. 456 */ 457 if (!rtwpci->low_power && !list_empty(&txwd->list)) 458 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 459 txch, seq); 460 } 461 462 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 463 skb_unlink(skb, &txwd->queue); 464 465 tx_data = RTW89_PCI_TX_SKB_CB(skb); 466 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 467 DMA_TO_DEVICE); 468 469 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 470 } 471 472 if (list_empty(&txwd->list)) 473 rtw89_pci_enqueue_txwd(tx_ring, txwd); 474 } 475 476 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 477 struct rtw89_pci_rpp_fmt *rpp) 478 { 479 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 480 struct rtw89_pci_tx_ring *tx_ring; 481 struct rtw89_pci_tx_wd_ring *wd_ring; 482 struct rtw89_pci_tx_wd *txwd; 483 u16 seq; 484 u8 qsel, tx_status, txch; 485 486 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 487 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 488 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 489 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 490 491 if (txch == RTW89_TXCH_CH12) { 492 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 493 return; 494 } 495 496 tx_ring = &rtwpci->tx_rings[txch]; 497 wd_ring = &tx_ring->wd_ring; 498 txwd = &wd_ring->pages[seq]; 499 500 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 501 } 502 503 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 504 struct rtw89_pci_tx_ring *tx_ring) 505 { 506 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 507 struct rtw89_pci_tx_wd *txwd; 508 int i; 509 510 for (i = 0; i < wd_ring->page_num; i++) { 511 txwd = &wd_ring->pages[i]; 512 513 if (!list_empty(&txwd->list)) 514 continue; 515 516 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 517 } 518 } 519 520 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 521 struct rtw89_pci_rx_ring *rx_ring, 522 u32 max_cnt) 523 { 524 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 525 struct rtw89_pci_rx_info *rx_info; 526 struct rtw89_pci_rpp_fmt *rpp; 527 struct rtw89_rx_desc_info desc_info = {}; 528 struct sk_buff *skb; 529 u32 cnt = 0; 530 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 531 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 532 u32 offset; 533 int ret; 534 535 skb = rx_ring->buf[bd_ring->wp]; 536 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 537 538 ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 539 if (ret) { 540 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 541 bd_ring->wp, ret); 542 goto err_sync_device; 543 } 544 545 rx_info = RTW89_PCI_RX_SKB_CB(skb); 546 if (!rx_info->fs || !rx_info->ls) { 547 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 548 return cnt; 549 } 550 551 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 552 553 /* first segment has RX desc */ 554 offset = desc_info.offset + desc_info.rxd_len; 555 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 556 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 557 rtw89_pci_release_rpp(rtwdev, rpp); 558 } 559 560 rtw89_pci_sync_skb_for_device(rtwdev, skb); 561 rtw89_pci_rxbd_increase(rx_ring, 1); 562 cnt++; 563 564 return cnt; 565 566 err_sync_device: 567 rtw89_pci_sync_skb_for_device(rtwdev, skb); 568 return 0; 569 } 570 571 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 572 struct rtw89_pci_rx_ring *rx_ring, 573 u32 cnt) 574 { 575 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 576 u32 release_cnt; 577 578 while (cnt) { 579 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 580 if (!release_cnt) { 581 rtw89_err(rtwdev, "failed to release TX skbs\n"); 582 583 /* skip the rest RXBD bufs */ 584 rtw89_pci_rxbd_increase(rx_ring, cnt); 585 break; 586 } 587 588 cnt -= release_cnt; 589 } 590 591 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 592 } 593 594 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 595 struct rtw89_pci *rtwpci, int budget) 596 { 597 struct rtw89_pci_rx_ring *rx_ring; 598 u32 cnt; 599 int work_done; 600 601 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 602 603 spin_lock_bh(&rtwpci->trx_lock); 604 605 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 606 if (cnt == 0) 607 goto out_unlock; 608 609 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 610 611 out_unlock: 612 spin_unlock_bh(&rtwpci->trx_lock); 613 614 /* always release all RPQ */ 615 work_done = min_t(int, cnt, budget); 616 rtwdev->napi_budget_countdown -= work_done; 617 618 return work_done; 619 } 620 621 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 622 struct rtw89_pci *rtwpci) 623 { 624 struct rtw89_pci_rx_ring *rx_ring; 625 struct rtw89_pci_dma_ring *bd_ring; 626 u32 reg_idx; 627 u16 hw_idx, hw_idx_next, host_idx; 628 int i; 629 630 for (i = 0; i < RTW89_RXCH_NUM; i++) { 631 rx_ring = &rtwpci->rx_rings[i]; 632 bd_ring = &rx_ring->bd_ring; 633 634 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 635 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 636 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 637 hw_idx_next = (hw_idx + 1) % bd_ring->len; 638 639 if (hw_idx_next == host_idx) 640 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 641 642 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 643 "%d RXD unavailable, idx=0x%08x, len=%d\n", 644 i, reg_idx, bd_ring->len); 645 } 646 } 647 648 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 649 struct rtw89_pci *rtwpci, 650 struct rtw89_pci_isrs *isrs) 651 { 652 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 653 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 654 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 655 656 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 657 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 658 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 659 } 660 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 661 662 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 663 struct rtw89_pci *rtwpci, 664 struct rtw89_pci_isrs *isrs) 665 { 666 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 667 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 668 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 669 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 670 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 671 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 672 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 673 674 if (isrs->halt_c2h_isrs) 675 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 676 if (isrs->isrs[0]) 677 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 678 if (isrs->isrs[1]) 679 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 680 } 681 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 682 683 static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 684 { 685 /* write 1 clear */ 686 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 687 } 688 689 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 690 { 691 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 692 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 693 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 694 } 695 EXPORT_SYMBOL(rtw89_pci_enable_intr); 696 697 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 698 { 699 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 700 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 701 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 702 } 703 EXPORT_SYMBOL(rtw89_pci_disable_intr); 704 705 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 706 { 707 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 708 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 709 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 710 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 711 } 712 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 713 714 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 715 { 716 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 717 } 718 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 719 720 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 721 { 722 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 723 unsigned long flags; 724 725 spin_lock_irqsave(&rtwpci->irq_lock, flags); 726 rtw89_chip_disable_intr(rtwdev, rtwpci); 727 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 728 rtw89_chip_enable_intr(rtwdev, rtwpci); 729 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 730 } 731 732 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 733 { 734 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 735 unsigned long flags; 736 737 spin_lock_irqsave(&rtwpci->irq_lock, flags); 738 rtw89_chip_disable_intr(rtwdev, rtwpci); 739 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 740 rtw89_chip_enable_intr(rtwdev, rtwpci); 741 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 742 } 743 744 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 745 { 746 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 747 int budget = NAPI_POLL_WEIGHT; 748 749 /* To prevent RXQ get stuck due to run out of budget. */ 750 rtwdev->napi_budget_countdown = budget; 751 752 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 753 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 754 } 755 756 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 757 { 758 struct rtw89_dev *rtwdev = dev; 759 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 760 struct rtw89_pci_isrs isrs; 761 unsigned long flags; 762 763 spin_lock_irqsave(&rtwpci->irq_lock, flags); 764 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 765 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 766 767 if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 768 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 769 770 if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 771 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 772 773 if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN)) 774 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 775 776 if (unlikely(rtwpci->under_recovery)) 777 goto enable_intr; 778 779 if (unlikely(rtwpci->low_power)) { 780 rtw89_pci_low_power_interrupt_handler(rtwdev); 781 goto enable_intr; 782 } 783 784 if (likely(rtwpci->running)) { 785 local_bh_disable(); 786 napi_schedule(&rtwdev->napi); 787 local_bh_enable(); 788 } 789 790 return IRQ_HANDLED; 791 792 enable_intr: 793 spin_lock_irqsave(&rtwpci->irq_lock, flags); 794 if (likely(rtwpci->running)) 795 rtw89_chip_enable_intr(rtwdev, rtwpci); 796 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 797 return IRQ_HANDLED; 798 } 799 800 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 801 { 802 struct rtw89_dev *rtwdev = dev; 803 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 804 unsigned long flags; 805 irqreturn_t irqret = IRQ_WAKE_THREAD; 806 807 spin_lock_irqsave(&rtwpci->irq_lock, flags); 808 809 /* If interrupt event is on the road, it is still trigger interrupt 810 * even we have done pci_stop() to turn off IMR. 811 */ 812 if (unlikely(!rtwpci->running)) { 813 irqret = IRQ_HANDLED; 814 goto exit; 815 } 816 817 rtw89_chip_disable_intr(rtwdev, rtwpci); 818 exit: 819 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 820 821 return irqret; 822 } 823 824 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 825 [RTW89_TXCH_##txch] = { \ 826 .num = R_AX_##txch##_TXBD_NUM ##v, \ 827 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 828 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 829 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 830 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 831 } 832 833 #define DEF_TXCHADDRS(info, txch, v...) \ 834 [RTW89_TXCH_##txch] = { \ 835 .num = R_AX_##txch##_TXBD_NUM, \ 836 .idx = R_AX_##txch##_TXBD_IDX, \ 837 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 838 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 839 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 840 } 841 842 #define DEF_RXCHADDRS(info, rxch, v...) \ 843 [RTW89_RXCH_##rxch] = { \ 844 .num = R_AX_##rxch##_RXBD_NUM ##v, \ 845 .idx = R_AX_##rxch##_RXBD_IDX ##v, \ 846 .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \ 847 .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \ 848 } 849 850 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 851 .tx = { 852 DEF_TXCHADDRS(info, ACH0), 853 DEF_TXCHADDRS(info, ACH1), 854 DEF_TXCHADDRS(info, ACH2), 855 DEF_TXCHADDRS(info, ACH3), 856 DEF_TXCHADDRS(info, ACH4), 857 DEF_TXCHADDRS(info, ACH5), 858 DEF_TXCHADDRS(info, ACH6), 859 DEF_TXCHADDRS(info, ACH7), 860 DEF_TXCHADDRS(info, CH8), 861 DEF_TXCHADDRS(info, CH9), 862 DEF_TXCHADDRS_TYPE1(info, CH10), 863 DEF_TXCHADDRS_TYPE1(info, CH11), 864 DEF_TXCHADDRS(info, CH12), 865 }, 866 .rx = { 867 DEF_RXCHADDRS(info, RXQ), 868 DEF_RXCHADDRS(info, RPQ), 869 }, 870 }; 871 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 872 873 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 874 .tx = { 875 DEF_TXCHADDRS(info, ACH0, _V1), 876 DEF_TXCHADDRS(info, ACH1, _V1), 877 DEF_TXCHADDRS(info, ACH2, _V1), 878 DEF_TXCHADDRS(info, ACH3, _V1), 879 DEF_TXCHADDRS(info, ACH4, _V1), 880 DEF_TXCHADDRS(info, ACH5, _V1), 881 DEF_TXCHADDRS(info, ACH6, _V1), 882 DEF_TXCHADDRS(info, ACH7, _V1), 883 DEF_TXCHADDRS(info, CH8, _V1), 884 DEF_TXCHADDRS(info, CH9, _V1), 885 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 886 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 887 DEF_TXCHADDRS(info, CH12, _V1), 888 }, 889 .rx = { 890 DEF_RXCHADDRS(info, RXQ, _V1), 891 DEF_RXCHADDRS(info, RPQ, _V1), 892 }, 893 }; 894 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 895 896 #undef DEF_TXCHADDRS_TYPE1 897 #undef DEF_TXCHADDRS 898 #undef DEF_RXCHADDRS 899 900 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 901 enum rtw89_tx_channel txch, 902 const struct rtw89_pci_ch_dma_addr **addr) 903 { 904 const struct rtw89_pci_info *info = rtwdev->pci_info; 905 906 if (txch >= RTW89_TXCH_NUM) 907 return -EINVAL; 908 909 *addr = &info->dma_addr_set->tx[txch]; 910 911 return 0; 912 } 913 914 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 915 enum rtw89_rx_channel rxch, 916 const struct rtw89_pci_ch_dma_addr **addr) 917 { 918 const struct rtw89_pci_info *info = rtwdev->pci_info; 919 920 if (rxch >= RTW89_RXCH_NUM) 921 return -EINVAL; 922 923 *addr = &info->dma_addr_set->rx[rxch]; 924 925 return 0; 926 } 927 928 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 929 { 930 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 931 932 /* reserved 1 desc check ring is full or not */ 933 if (bd_ring->rp > bd_ring->wp) 934 return bd_ring->rp - bd_ring->wp - 1; 935 936 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 937 } 938 939 static 940 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 941 { 942 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 943 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 944 u32 cnt; 945 946 spin_lock_bh(&rtwpci->trx_lock); 947 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 948 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 949 spin_unlock_bh(&rtwpci->trx_lock); 950 951 return cnt; 952 } 953 954 static 955 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 956 u8 txch) 957 { 958 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 959 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 960 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 961 u32 cnt; 962 963 spin_lock_bh(&rtwpci->trx_lock); 964 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 965 cnt = min(cnt, wd_ring->curr_num); 966 spin_unlock_bh(&rtwpci->trx_lock); 967 968 return cnt; 969 } 970 971 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 972 u8 txch) 973 { 974 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 975 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 976 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 977 const struct rtw89_chip_info *chip = rtwdev->chip; 978 u32 bd_cnt, wd_cnt, min_cnt = 0; 979 struct rtw89_pci_rx_ring *rx_ring; 980 enum rtw89_debug_mask debug_mask; 981 u32 cnt; 982 983 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 984 985 spin_lock_bh(&rtwpci->trx_lock); 986 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 987 wd_cnt = wd_ring->curr_num; 988 989 if (wd_cnt == 0 || bd_cnt == 0) { 990 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 991 if (cnt) 992 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 993 else if (wd_cnt == 0) 994 goto out_unlock; 995 996 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 997 if (bd_cnt == 0) 998 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 999 } 1000 1001 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1002 wd_cnt = wd_ring->curr_num; 1003 min_cnt = min(bd_cnt, wd_cnt); 1004 if (min_cnt == 0) { 1005 /* This message can be frequently shown in low power mode or 1006 * high traffic with small FIFO chips, and we have recognized it as normal 1007 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1008 */ 1009 if (rtwpci->low_power || chip->small_fifo_size) 1010 debug_mask = RTW89_DBG_TXRX; 1011 else 1012 debug_mask = RTW89_DBG_UNEXP; 1013 1014 rtw89_debug(rtwdev, debug_mask, 1015 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1016 wd_cnt, bd_cnt); 1017 } 1018 1019 out_unlock: 1020 spin_unlock_bh(&rtwpci->trx_lock); 1021 1022 return min_cnt; 1023 } 1024 1025 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1026 u8 txch) 1027 { 1028 if (rtwdev->hci.paused) 1029 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1030 1031 if (txch == RTW89_TXCH_CH12) 1032 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1033 1034 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1035 } 1036 1037 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1038 { 1039 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1040 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1041 u32 host_idx, addr; 1042 1043 spin_lock_bh(&rtwpci->trx_lock); 1044 1045 addr = bd_ring->addr.idx; 1046 host_idx = bd_ring->wp; 1047 rtw89_write16(rtwdev, addr, host_idx); 1048 1049 spin_unlock_bh(&rtwpci->trx_lock); 1050 } 1051 1052 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1053 int n_txbd) 1054 { 1055 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1056 u32 host_idx, len; 1057 1058 len = bd_ring->len; 1059 host_idx = bd_ring->wp + n_txbd; 1060 host_idx = host_idx < len ? host_idx : host_idx - len; 1061 1062 bd_ring->wp = host_idx; 1063 } 1064 1065 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1066 { 1067 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1068 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1069 1070 if (rtwdev->hci.paused) { 1071 set_bit(txch, rtwpci->kick_map); 1072 return; 1073 } 1074 1075 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1076 } 1077 1078 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1079 { 1080 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1081 struct rtw89_pci_tx_ring *tx_ring; 1082 int txch; 1083 1084 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1085 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1086 continue; 1087 1088 tx_ring = &rtwpci->tx_rings[txch]; 1089 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1090 } 1091 } 1092 1093 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1094 { 1095 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1096 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1097 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1098 u32 cur_idx, cur_rp; 1099 u8 i; 1100 1101 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1102 * define a reasonable fixed total timeout to use read_poll_timeout* 1103 * helper. Instead, we can ensure a reasonable polling times, so we 1104 * just use for loop with udelay here. 1105 */ 1106 for (i = 0; i < 60; i++) { 1107 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1108 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1109 if (cur_rp == bd_ring->wp) 1110 return; 1111 1112 udelay(1); 1113 } 1114 1115 if (!drop) 1116 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1117 } 1118 1119 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1120 bool drop) 1121 { 1122 const struct rtw89_pci_info *info = rtwdev->pci_info; 1123 u8 i; 1124 1125 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1126 /* It may be unnecessary to flush FWCMD queue. */ 1127 if (i == RTW89_TXCH_CH12) 1128 continue; 1129 if (info->tx_dma_ch_mask & BIT(i)) 1130 continue; 1131 1132 if (txchs & BIT(i)) 1133 __pci_flush_txch(rtwdev, i, drop); 1134 } 1135 } 1136 1137 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1138 bool drop) 1139 { 1140 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1141 } 1142 1143 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1144 void *txaddr_info_addr, u32 total_len, 1145 dma_addr_t dma, u8 *add_info_nr) 1146 { 1147 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1148 1149 txaddr_info->length = cpu_to_le16(total_len); 1150 txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1151 RTW89_PCI_ADDR_NUM(1)); 1152 txaddr_info->dma = cpu_to_le32(dma); 1153 1154 *add_info_nr = 1; 1155 1156 return sizeof(*txaddr_info); 1157 } 1158 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1159 1160 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1161 void *txaddr_info_addr, u32 total_len, 1162 dma_addr_t dma, u8 *add_info_nr) 1163 { 1164 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1165 u32 remain = total_len; 1166 u32 len; 1167 u16 length_option; 1168 int n; 1169 1170 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1171 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1172 TXADDR_INFO_LENTHG_V1_MAX : remain; 1173 remain -= len; 1174 1175 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1176 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1177 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1178 txaddr_info->length_opt = cpu_to_le16(length_option); 1179 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1180 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1181 1182 dma += len; 1183 txaddr_info++; 1184 } 1185 1186 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1187 remain, total_len); 1188 1189 *add_info_nr = n; 1190 1191 return n * sizeof(*txaddr_info); 1192 } 1193 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1194 1195 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1196 struct rtw89_pci_tx_ring *tx_ring, 1197 struct rtw89_pci_tx_wd *txwd, 1198 struct rtw89_core_tx_request *tx_req) 1199 { 1200 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1201 const struct rtw89_chip_info *chip = rtwdev->chip; 1202 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1203 struct rtw89_txwd_info *txwd_info; 1204 struct rtw89_pci_tx_wp_info *txwp_info; 1205 void *txaddr_info_addr; 1206 struct pci_dev *pdev = rtwpci->pdev; 1207 struct sk_buff *skb = tx_req->skb; 1208 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1209 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1210 bool en_wd_info = desc_info->en_wd_info; 1211 u32 txwd_len; 1212 u32 txwp_len; 1213 u32 txaddr_info_len; 1214 dma_addr_t dma; 1215 int ret; 1216 1217 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1218 if (dma_mapping_error(&pdev->dev, dma)) { 1219 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1220 ret = -EBUSY; 1221 goto err; 1222 } 1223 1224 tx_data->dma = dma; 1225 rcu_assign_pointer(skb_data->wait, NULL); 1226 1227 txwp_len = sizeof(*txwp_info); 1228 txwd_len = chip->txwd_body_size; 1229 txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 1230 1231 #if defined(__linux__) 1232 txwp_info = txwd->vaddr + txwd_len; 1233 #elif defined(__FreeBSD__) 1234 txwp_info = (struct rtw89_pci_tx_wp_info *)((u8 *)txwd->vaddr + txwd_len); 1235 #endif 1236 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1237 txwp_info->seq1 = 0; 1238 txwp_info->seq2 = 0; 1239 txwp_info->seq3 = 0; 1240 1241 tx_ring->tx_cnt++; 1242 #if defined(__linux__) 1243 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1244 #elif defined(__FreeBSD__) 1245 txaddr_info_addr = (u8 *)txwd->vaddr + txwd_len + txwp_len; 1246 #endif 1247 txaddr_info_len = 1248 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1249 dma, &desc_info->addr_info_nr); 1250 1251 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1252 1253 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1254 1255 skb_queue_tail(&txwd->queue, skb); 1256 1257 return 0; 1258 1259 err: 1260 return ret; 1261 } 1262 1263 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1264 struct rtw89_pci_tx_ring *tx_ring, 1265 struct rtw89_pci_tx_bd_32 *txbd, 1266 struct rtw89_core_tx_request *tx_req) 1267 { 1268 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1269 const struct rtw89_chip_info *chip = rtwdev->chip; 1270 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1271 void *txdesc; 1272 int txdesc_size = chip->h2c_desc_size; 1273 struct pci_dev *pdev = rtwpci->pdev; 1274 struct sk_buff *skb = tx_req->skb; 1275 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1276 dma_addr_t dma; 1277 1278 txdesc = skb_push(skb, txdesc_size); 1279 memset(txdesc, 0, txdesc_size); 1280 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1281 1282 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1283 if (dma_mapping_error(&pdev->dev, dma)) { 1284 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1285 return -EBUSY; 1286 } 1287 1288 tx_data->dma = dma; 1289 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1290 txbd->length = cpu_to_le16(skb->len); 1291 txbd->dma = cpu_to_le32(tx_data->dma); 1292 skb_queue_tail(&rtwpci->h2c_queue, skb); 1293 1294 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1295 1296 return 0; 1297 } 1298 1299 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1300 struct rtw89_pci_tx_ring *tx_ring, 1301 struct rtw89_pci_tx_bd_32 *txbd, 1302 struct rtw89_core_tx_request *tx_req) 1303 { 1304 struct rtw89_pci_tx_wd *txwd; 1305 int ret; 1306 1307 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1308 * buffer with WD BODY only. So here we don't need to check the free 1309 * pages of the wd ring. 1310 */ 1311 if (tx_ring->txch == RTW89_TXCH_CH12) 1312 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1313 1314 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1315 if (!txwd) { 1316 rtw89_err(rtwdev, "no available TXWD\n"); 1317 ret = -ENOSPC; 1318 goto err; 1319 } 1320 1321 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1322 if (ret) { 1323 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1324 goto err_enqueue_wd; 1325 } 1326 1327 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1328 1329 txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1330 txbd->length = cpu_to_le16(txwd->len); 1331 txbd->dma = cpu_to_le32(txwd->paddr); 1332 1333 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1334 1335 return 0; 1336 1337 err_enqueue_wd: 1338 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1339 err: 1340 return ret; 1341 } 1342 1343 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1344 u8 txch) 1345 { 1346 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1347 struct rtw89_pci_tx_ring *tx_ring; 1348 struct rtw89_pci_tx_bd_32 *txbd; 1349 u32 n_avail_txbd; 1350 int ret = 0; 1351 1352 /* check the tx type and dma channel for fw cmd queue */ 1353 if ((txch == RTW89_TXCH_CH12 || 1354 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1355 (txch != RTW89_TXCH_CH12 || 1356 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1357 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1358 return -EINVAL; 1359 } 1360 1361 tx_ring = &rtwpci->tx_rings[txch]; 1362 spin_lock_bh(&rtwpci->trx_lock); 1363 1364 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1365 if (n_avail_txbd == 0) { 1366 rtw89_err(rtwdev, "no available TXBD\n"); 1367 ret = -ENOSPC; 1368 goto err_unlock; 1369 } 1370 1371 txbd = rtw89_pci_get_next_txbd(tx_ring); 1372 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1373 if (ret) { 1374 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1375 goto err_unlock; 1376 } 1377 1378 spin_unlock_bh(&rtwpci->trx_lock); 1379 return 0; 1380 1381 err_unlock: 1382 spin_unlock_bh(&rtwpci->trx_lock); 1383 return ret; 1384 } 1385 1386 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1387 { 1388 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1389 int ret; 1390 1391 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1392 if (ret) { 1393 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1394 return ret; 1395 } 1396 1397 return 0; 1398 } 1399 1400 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1401 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1402 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1403 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1404 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1405 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1406 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1407 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1408 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1409 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1410 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1411 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1412 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1413 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1414 }; 1415 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1416 1417 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1418 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1419 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1420 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1421 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1422 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1423 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1424 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1425 }; 1426 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1427 1428 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1429 { 1430 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1431 const struct rtw89_pci_info *info = rtwdev->pci_info; 1432 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1433 struct rtw89_pci_tx_ring *tx_ring; 1434 struct rtw89_pci_rx_ring *rx_ring; 1435 struct rtw89_pci_dma_ring *bd_ring; 1436 const struct rtw89_pci_bd_ram *bd_ram; 1437 u32 addr_num; 1438 u32 addr_bdram; 1439 u32 addr_desa_l; 1440 u32 val32; 1441 int i; 1442 1443 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1444 if (info->tx_dma_ch_mask & BIT(i)) 1445 continue; 1446 1447 tx_ring = &rtwpci->tx_rings[i]; 1448 bd_ring = &tx_ring->bd_ring; 1449 bd_ram = &bd_ram_table[i]; 1450 addr_num = bd_ring->addr.num; 1451 addr_bdram = bd_ring->addr.bdram; 1452 addr_desa_l = bd_ring->addr.desa_l; 1453 bd_ring->wp = 0; 1454 bd_ring->rp = 0; 1455 1456 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1457 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1458 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1459 1460 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1461 rtw89_write32(rtwdev, addr_bdram, val32); 1462 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1463 } 1464 1465 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1466 rx_ring = &rtwpci->rx_rings[i]; 1467 bd_ring = &rx_ring->bd_ring; 1468 addr_num = bd_ring->addr.num; 1469 addr_desa_l = bd_ring->addr.desa_l; 1470 bd_ring->wp = 0; 1471 bd_ring->rp = 0; 1472 rx_ring->diliver_skb = NULL; 1473 rx_ring->diliver_desc.ready = false; 1474 1475 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1476 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1477 } 1478 } 1479 1480 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1481 struct rtw89_pci_tx_ring *tx_ring) 1482 { 1483 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1484 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1485 } 1486 1487 static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1488 { 1489 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1490 const struct rtw89_pci_info *info = rtwdev->pci_info; 1491 int txch; 1492 1493 rtw89_pci_reset_trx_rings(rtwdev); 1494 1495 spin_lock_bh(&rtwpci->trx_lock); 1496 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1497 if (info->tx_dma_ch_mask & BIT(txch)) 1498 continue; 1499 if (txch == RTW89_TXCH_CH12) { 1500 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1501 skb_queue_len(&rtwpci->h2c_queue), true); 1502 continue; 1503 } 1504 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1505 } 1506 spin_unlock_bh(&rtwpci->trx_lock); 1507 } 1508 1509 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1510 { 1511 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1512 unsigned long flags; 1513 1514 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1515 rtwpci->running = true; 1516 rtw89_chip_enable_intr(rtwdev, rtwpci); 1517 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1518 } 1519 1520 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1521 { 1522 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1523 unsigned long flags; 1524 1525 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1526 rtwpci->running = false; 1527 rtw89_chip_disable_intr(rtwdev, rtwpci); 1528 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1529 } 1530 1531 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1532 { 1533 rtw89_core_napi_start(rtwdev); 1534 rtw89_pci_enable_intr_lock(rtwdev); 1535 1536 return 0; 1537 } 1538 1539 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1540 { 1541 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1542 struct pci_dev *pdev = rtwpci->pdev; 1543 1544 rtw89_pci_disable_intr_lock(rtwdev); 1545 synchronize_irq(pdev->irq); 1546 rtw89_core_napi_stop(rtwdev); 1547 } 1548 1549 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1550 { 1551 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1552 struct pci_dev *pdev = rtwpci->pdev; 1553 1554 if (pause) { 1555 rtw89_pci_disable_intr_lock(rtwdev); 1556 synchronize_irq(pdev->irq); 1557 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1558 napi_synchronize(&rtwdev->napi); 1559 } else { 1560 rtw89_pci_enable_intr_lock(rtwdev); 1561 rtw89_pci_tx_kick_off_pending(rtwdev); 1562 } 1563 } 1564 1565 static 1566 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1567 { 1568 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1569 const struct rtw89_pci_info *info = rtwdev->pci_info; 1570 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1571 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1572 struct rtw89_pci_tx_ring *tx_ring; 1573 struct rtw89_pci_rx_ring *rx_ring; 1574 int i; 1575 1576 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1577 return; 1578 1579 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1580 tx_ring = &rtwpci->tx_rings[i]; 1581 tx_ring->bd_ring.addr.idx = low_power ? 1582 bd_idx_addr->tx_bd_addrs[i] : 1583 dma_addr_set->tx[i].idx; 1584 } 1585 1586 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1587 rx_ring = &rtwpci->rx_rings[i]; 1588 rx_ring->bd_ring.addr.idx = low_power ? 1589 bd_idx_addr->rx_bd_addrs[i] : 1590 dma_addr_set->rx[i].idx; 1591 } 1592 } 1593 1594 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1595 { 1596 enum rtw89_pci_intr_mask_cfg cfg; 1597 1598 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1599 1600 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1601 rtw89_chip_config_intr_mask(rtwdev, cfg); 1602 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1603 } 1604 1605 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1606 1607 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1608 { 1609 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1610 #if defined(__linux__) 1611 u32 val = readl(rtwpci->mmap + addr); 1612 #elif defined(__FreeBSD__) 1613 u32 val; 1614 1615 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1616 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1617 #endif 1618 int count; 1619 1620 for (count = 0; ; count++) { 1621 if (val != RTW89_R32_DEAD) 1622 return val; 1623 if (count >= MAC_REG_POOL_COUNT) { 1624 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1625 return RTW89_R32_DEAD; 1626 } 1627 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1628 #if defined(__linux__) 1629 val = readl(rtwpci->mmap + addr); 1630 #elif defined(__FreeBSD__) 1631 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1632 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1633 #endif 1634 } 1635 1636 return val; 1637 } 1638 1639 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1640 { 1641 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1642 u32 addr32, val32, shift; 1643 1644 if (!ACCESS_CMAC(addr)) 1645 #if defined(__linux__) 1646 return readb(rtwpci->mmap + addr); 1647 #elif defined(__FreeBSD__) 1648 { 1649 u8 val; 1650 1651 val = bus_read_1((struct resource *)rtwpci->mmap, addr); 1652 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); 1653 return (val); 1654 } 1655 #endif 1656 1657 addr32 = addr & ~0x3; 1658 shift = (addr & 0x3) * 8; 1659 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1660 return val32 >> shift; 1661 } 1662 1663 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1664 { 1665 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1666 u32 addr32, val32, shift; 1667 1668 if (!ACCESS_CMAC(addr)) 1669 #if defined(__linux__) 1670 return readw(rtwpci->mmap + addr); 1671 #elif defined(__FreeBSD__) 1672 { 1673 u16 val; 1674 1675 val = bus_read_2((struct resource *)rtwpci->mmap, addr); 1676 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); 1677 return (val); 1678 } 1679 #endif 1680 1681 addr32 = addr & ~0x3; 1682 shift = (addr & 0x3) * 8; 1683 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1684 return val32 >> shift; 1685 } 1686 1687 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1688 { 1689 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1690 1691 if (!ACCESS_CMAC(addr)) 1692 #if defined(__linux__) 1693 return readl(rtwpci->mmap + addr); 1694 #elif defined(__FreeBSD__) 1695 { 1696 u32 val; 1697 1698 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1699 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1700 return (val); 1701 } 1702 #endif 1703 1704 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1705 } 1706 1707 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1708 { 1709 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1710 1711 #if defined(__linux__) 1712 writeb(data, rtwpci->mmap + addr); 1713 #elif defined(__FreeBSD__) 1714 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, data); 1715 return (bus_write_1((struct resource *)rtwpci->mmap, addr, data)); 1716 #endif 1717 } 1718 1719 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1720 { 1721 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1722 1723 #if defined(__linux__) 1724 writew(data, rtwpci->mmap + addr); 1725 #elif defined(__FreeBSD__) 1726 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, data); 1727 return (bus_write_2((struct resource *)rtwpci->mmap, addr, data)); 1728 #endif 1729 } 1730 1731 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1732 { 1733 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1734 1735 #if defined(__linux__) 1736 writel(data, rtwpci->mmap + addr); 1737 #elif defined(__FreeBSD__) 1738 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, data); 1739 return (bus_write_4((struct resource *)rtwpci->mmap, addr, data)); 1740 #endif 1741 } 1742 1743 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1744 { 1745 const struct rtw89_pci_info *info = rtwdev->pci_info; 1746 1747 if (enable) 1748 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1749 info->rxhci_en_bit | info->txhci_en_bit); 1750 else 1751 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1752 info->rxhci_en_bit | info->txhci_en_bit); 1753 } 1754 1755 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1756 { 1757 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1758 u32 reg, mask; 1759 1760 if (chip_id == RTL8852C) { 1761 reg = R_AX_HAXI_INIT_CFG1; 1762 mask = B_AX_STOP_AXI_MST; 1763 } else { 1764 reg = R_AX_PCIE_DMA_STOP1; 1765 mask = B_AX_STOP_PCIEIO; 1766 } 1767 1768 if (enable) 1769 rtw89_write32_clr(rtwdev, reg, mask); 1770 else 1771 rtw89_write32_set(rtwdev, reg, mask); 1772 } 1773 1774 static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1775 { 1776 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1777 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1778 } 1779 1780 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1781 { 1782 u16 val; 1783 1784 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1785 1786 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1787 switch (speed) { 1788 case PCIE_PHY_GEN1: 1789 if (addr < 0x20) 1790 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1791 else 1792 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1793 break; 1794 case PCIE_PHY_GEN2: 1795 if (addr < 0x20) 1796 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1797 else 1798 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1799 break; 1800 default: 1801 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1802 return -EINVAL; 1803 } 1804 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1805 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1806 1807 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1808 false, rtwdev, R_AX_MDIO_CFG); 1809 } 1810 1811 static int 1812 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1813 { 1814 int ret; 1815 1816 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1817 if (ret) { 1818 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1819 return ret; 1820 } 1821 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1822 1823 return 0; 1824 } 1825 1826 static int 1827 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1828 { 1829 int ret; 1830 1831 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1832 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1833 if (ret) { 1834 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1835 return ret; 1836 } 1837 1838 return 0; 1839 } 1840 1841 static int 1842 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1843 { 1844 u32 shift; 1845 int ret; 1846 u16 val; 1847 1848 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1849 if (ret) 1850 return ret; 1851 1852 shift = __ffs(mask); 1853 val &= ~mask; 1854 val |= ((data << shift) & mask); 1855 1856 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1857 if (ret) 1858 return ret; 1859 1860 return 0; 1861 } 1862 1863 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1864 { 1865 int ret; 1866 u16 val; 1867 1868 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1869 if (ret) 1870 return ret; 1871 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1872 if (ret) 1873 return ret; 1874 1875 return 0; 1876 } 1877 1878 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1879 { 1880 int ret; 1881 u16 val; 1882 1883 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1884 if (ret) 1885 return ret; 1886 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1887 if (ret) 1888 return ret; 1889 1890 return 0; 1891 } 1892 1893 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1894 u8 data) 1895 { 1896 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1897 struct pci_dev *pdev = rtwpci->pdev; 1898 1899 return pci_write_config_byte(pdev, addr, data); 1900 } 1901 1902 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1903 u8 *value) 1904 { 1905 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1906 struct pci_dev *pdev = rtwpci->pdev; 1907 1908 return pci_read_config_byte(pdev, addr, value); 1909 } 1910 1911 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1912 u8 bit) 1913 { 1914 u8 value; 1915 int ret; 1916 1917 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1918 if (ret) 1919 return ret; 1920 1921 value |= bit; 1922 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1923 1924 return ret; 1925 } 1926 1927 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 1928 u8 bit) 1929 { 1930 u8 value; 1931 int ret; 1932 1933 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1934 if (ret) 1935 return ret; 1936 1937 value &= ~bit; 1938 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1939 1940 return ret; 1941 } 1942 1943 static int 1944 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1945 { 1946 u16 val, tar; 1947 int ret; 1948 1949 /* Enable counter */ 1950 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1951 if (ret) 1952 return ret; 1953 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1954 phy_rate); 1955 if (ret) 1956 return ret; 1957 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1958 phy_rate); 1959 if (ret) 1960 return ret; 1961 1962 fsleep(300); 1963 1964 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1965 if (ret) 1966 return ret; 1967 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1968 phy_rate); 1969 if (ret) 1970 return ret; 1971 1972 tar = tar & 0x0FFF; 1973 if (tar == 0 || tar == 0x0FFF) { 1974 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1975 return -EINVAL; 1976 } 1977 1978 *target = tar; 1979 1980 return 0; 1981 } 1982 1983 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 1984 { 1985 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1986 int ret; 1987 1988 if (chip_id != RTL8852B && chip_id != RTL8851B) 1989 return 0; 1990 1991 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 1992 PCIE_AUTOK_4, PCIE_PHY_GEN1); 1993 return ret; 1994 } 1995 1996 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1997 { 1998 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1999 enum rtw89_pcie_phy phy_rate; 2000 u16 val16, mgn_set, div_set, tar; 2001 u8 val8, bdr_ori; 2002 bool l1_flag = false; 2003 int ret = 0; 2004 2005 if (chip_id != RTL8852B && chip_id != RTL8851B) 2006 return 0; 2007 2008 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 2009 if (ret) { 2010 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 2011 RTW89_PCIE_PHY_RATE); 2012 return ret; 2013 } 2014 2015 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 2016 phy_rate = PCIE_PHY_GEN1; 2017 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 2018 phy_rate = PCIE_PHY_GEN2; 2019 } else { 2020 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 2021 return -EOPNOTSUPP; 2022 } 2023 /* Disable L1BD */ 2024 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 2025 if (ret) { 2026 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 2027 return ret; 2028 } 2029 2030 if (bdr_ori & RTW89_PCIE_BIT_L1) { 2031 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2032 bdr_ori & ~RTW89_PCIE_BIT_L1); 2033 if (ret) { 2034 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2035 RTW89_PCIE_L1_CTRL); 2036 return ret; 2037 } 2038 l1_flag = true; 2039 } 2040 2041 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2042 if (ret) { 2043 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2044 goto end; 2045 } 2046 2047 if (val16 & B_AX_CALIB_EN) { 2048 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 2049 val16 & ~B_AX_CALIB_EN, phy_rate); 2050 if (ret) { 2051 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2052 goto end; 2053 } 2054 } 2055 2056 if (!autook_en) 2057 goto end; 2058 /* Set div */ 2059 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 2060 if (ret) { 2061 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2062 goto end; 2063 } 2064 2065 /* Obtain div and margin */ 2066 ret = __get_target(rtwdev, &tar, phy_rate); 2067 if (ret) { 2068 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2069 goto end; 2070 } 2071 2072 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2073 2074 if (mgn_set >= 128) { 2075 div_set = 0x0003; 2076 mgn_set = 0x000F; 2077 } else if (mgn_set >= 64) { 2078 div_set = 0x0003; 2079 mgn_set >>= 3; 2080 } else if (mgn_set >= 32) { 2081 div_set = 0x0002; 2082 mgn_set >>= 2; 2083 } else if (mgn_set >= 16) { 2084 div_set = 0x0001; 2085 mgn_set >>= 1; 2086 } else if (mgn_set == 0) { 2087 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2088 goto end; 2089 } else { 2090 div_set = 0x0000; 2091 } 2092 2093 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2094 if (ret) { 2095 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2096 goto end; 2097 } 2098 2099 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2100 2101 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2102 if (ret) { 2103 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2104 goto end; 2105 } 2106 2107 ret = __get_target(rtwdev, &tar, phy_rate); 2108 if (ret) { 2109 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2110 goto end; 2111 } 2112 2113 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2114 tar, div_set, mgn_set); 2115 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2116 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2117 if (ret) { 2118 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2119 goto end; 2120 } 2121 2122 /* Enable function */ 2123 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2124 if (ret) { 2125 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2126 goto end; 2127 } 2128 2129 /* CLK delay = 0 */ 2130 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2131 PCIE_CLKDLY_HW_0); 2132 2133 end: 2134 /* Set L1BD to ori */ 2135 if (l1_flag) { 2136 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2137 bdr_ori); 2138 if (ret) { 2139 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2140 RTW89_PCIE_L1_CTRL); 2141 return ret; 2142 } 2143 } 2144 2145 return ret; 2146 } 2147 2148 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2149 { 2150 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2151 int ret; 2152 2153 if (chip_id == RTL8852A) { 2154 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2155 PCIE_PHY_GEN1); 2156 if (ret) 2157 return ret; 2158 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2159 PCIE_PHY_GEN2); 2160 if (ret) 2161 return ret; 2162 } else if (chip_id == RTL8852C) { 2163 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2164 B_AX_DEGLITCH); 2165 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2166 B_AX_DEGLITCH); 2167 } 2168 2169 return 0; 2170 } 2171 2172 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2173 { 2174 if (rtwdev->chip->chip_id != RTL8852A) 2175 return; 2176 2177 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2178 } 2179 2180 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2181 { 2182 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2183 2184 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) 2185 return; 2186 2187 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2188 } 2189 2190 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2191 { 2192 int ret; 2193 2194 if (rtwdev->chip->chip_id != RTL8852A) 2195 return 0; 2196 2197 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2198 PCIE_PHY_GEN1); 2199 if (ret) 2200 return ret; 2201 2202 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2203 PCIE_PHY_GEN2); 2204 if (ret) 2205 return ret; 2206 2207 return 0; 2208 } 2209 2210 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2211 { 2212 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2213 2214 if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) 2215 return; 2216 2217 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2218 } 2219 2220 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2221 { 2222 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2223 2224 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 2225 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2226 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2227 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2228 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2229 } else if (rtwdev->chip->chip_id == RTL8852C) { 2230 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2231 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2232 } 2233 } 2234 2235 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2236 { 2237 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2238 2239 if (chip_id != RTL8852B && chip_id != RTL8851B) 2240 return 0; 2241 2242 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2243 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2244 } 2245 2246 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2247 { 2248 if (pwr_up) 2249 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2250 else 2251 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2252 } 2253 2254 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2255 { 2256 if (rtwdev->chip->chip_id != RTL8852C) 2257 return; 2258 2259 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2260 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2261 } 2262 2263 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2264 { 2265 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2266 return; 2267 2268 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2269 } 2270 2271 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2272 { 2273 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2274 return; 2275 2276 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2277 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2278 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2279 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2280 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2281 } 2282 2283 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2284 { 2285 if (rtwdev->chip->chip_id != RTL8852C) 2286 return; 2287 2288 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2289 } 2290 2291 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2292 { 2293 if (rtwdev->chip->chip_id != RTL8852C) 2294 return; 2295 2296 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2297 } 2298 2299 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2300 { 2301 if (rtwdev->chip->chip_id == RTL8852C) 2302 return; 2303 2304 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2305 B_AX_SIC_EN_FORCE_CLKREQ); 2306 } 2307 2308 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2309 { 2310 const struct rtw89_pci_info *info = rtwdev->pci_info; 2311 u32 lbc; 2312 2313 if (rtwdev->chip->chip_id == RTL8852C) 2314 return; 2315 2316 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2317 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2318 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2319 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2320 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2321 } else { 2322 lbc &= ~B_AX_LBC_EN; 2323 } 2324 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2325 } 2326 2327 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2328 { 2329 const struct rtw89_pci_info *info = rtwdev->pci_info; 2330 u32 val32; 2331 2332 if (rtwdev->chip->chip_id != RTL8852C) 2333 return; 2334 2335 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2336 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2337 info->io_rcy_tmr); 2338 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2339 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2340 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2341 2342 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2343 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2344 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2345 } else { 2346 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2347 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2348 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2349 } 2350 2351 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2352 } 2353 2354 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2355 { 2356 if (rtwdev->chip->chip_id == RTL8852C) 2357 return; 2358 2359 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2360 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2361 2362 if (rtwdev->chip->chip_id == RTL8852A) 2363 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2364 B_AX_EN_CHKDSC_NO_RX_STUCK); 2365 } 2366 2367 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2368 { 2369 if (rtwdev->chip->chip_id == RTL8852C) 2370 return; 2371 2372 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2373 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2374 } 2375 2376 static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 2377 { 2378 const struct rtw89_pci_info *info = rtwdev->pci_info; 2379 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2380 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2381 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2382 B_AX_CLR_CH12_IDX; 2383 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2384 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2385 2386 if (chip_id == RTL8852A || chip_id == RTL8852C) 2387 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2388 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2389 /* clear DMA indexes */ 2390 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2391 if (chip_id == RTL8852A || chip_id == RTL8852C) 2392 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2393 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2394 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2395 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2396 } 2397 2398 static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2399 { 2400 const struct rtw89_pci_info *info = rtwdev->pci_info; 2401 u32 ret, check, dma_busy; 2402 u32 dma_busy1 = info->dma_busy1.addr; 2403 u32 dma_busy2 = info->dma_busy2_reg; 2404 2405 check = info->dma_busy1.mask; 2406 2407 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2408 10, 100, false, rtwdev, dma_busy1); 2409 if (ret) 2410 return ret; 2411 2412 if (!dma_busy2) 2413 return 0; 2414 2415 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2416 2417 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2418 10, 100, false, rtwdev, dma_busy2); 2419 if (ret) 2420 return ret; 2421 2422 return 0; 2423 } 2424 2425 static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2426 { 2427 const struct rtw89_pci_info *info = rtwdev->pci_info; 2428 u32 ret, check, dma_busy; 2429 u32 dma_busy3 = info->dma_busy3_reg; 2430 2431 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2432 2433 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2434 10, 100, false, rtwdev, dma_busy3); 2435 if (ret) 2436 return ret; 2437 2438 return 0; 2439 } 2440 2441 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2442 { 2443 u32 ret; 2444 2445 ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2446 if (ret) { 2447 rtw89_err(rtwdev, "txdma ch busy\n"); 2448 return ret; 2449 } 2450 2451 ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2452 if (ret) { 2453 rtw89_err(rtwdev, "rxdma ch busy\n"); 2454 return ret; 2455 } 2456 2457 return 0; 2458 } 2459 2460 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2461 { 2462 const struct rtw89_pci_info *info = rtwdev->pci_info; 2463 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2464 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2465 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2466 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2467 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2468 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2469 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2470 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2471 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2472 u8 cv = rtwdev->hal.cv; 2473 u32 val32; 2474 2475 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2476 if (chip_id == RTL8852A && cv == CHIP_CBV) 2477 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2478 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2479 if (chip_id == RTL8852A || chip_id == RTL8852B) 2480 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2481 } 2482 2483 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2484 if (chip_id == RTL8852A && cv == CHIP_CBV) 2485 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2486 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2487 if (chip_id == RTL8852A || chip_id == RTL8852B) 2488 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2489 } 2490 2491 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2492 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2493 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2494 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2495 2496 if (chip_id == RTL8852A || chip_id == RTL8852B) 2497 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2498 B_AX_PCIE_RX_APPLEN_MASK, 0); 2499 } 2500 2501 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2502 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2503 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2504 } else if (chip_id == RTL8852C) { 2505 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2506 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2507 } 2508 2509 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2510 if (tag_mode == MAC_AX_TAG_SGL) { 2511 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2512 ~B_AX_LATENCY_CONTROL; 2513 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2514 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2515 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2516 B_AX_LATENCY_CONTROL; 2517 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2518 } 2519 } 2520 2521 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2522 info->multi_tag_num); 2523 2524 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2525 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2526 wd_dma_idle_intvl); 2527 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2528 wd_dma_act_intvl); 2529 } else if (chip_id == RTL8852C) { 2530 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2531 wd_dma_idle_intvl); 2532 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2533 wd_dma_act_intvl); 2534 } 2535 2536 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2537 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2538 B_AX_HOST_ADDR_INFO_8B_SEL); 2539 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2540 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2541 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2542 B_AX_HOST_ADDR_INFO_8B_SEL); 2543 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2544 } 2545 2546 return 0; 2547 } 2548 2549 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2550 { 2551 const struct rtw89_pci_info *info = rtwdev->pci_info; 2552 2553 if (rtwdev->chip->chip_id == RTL8852A) { 2554 /* ltr sw trigger */ 2555 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2556 } 2557 info->ltr_set(rtwdev, false); 2558 rtw89_pci_ctrl_dma_all(rtwdev, false); 2559 rtw89_pci_clr_idx_all(rtwdev); 2560 2561 return 0; 2562 } 2563 2564 static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 2565 { 2566 const struct rtw89_pci_info *info = rtwdev->pci_info; 2567 int ret; 2568 2569 rtw89_pci_rxdma_prefth(rtwdev); 2570 rtw89_pci_l1off_pwroff(rtwdev); 2571 rtw89_pci_deglitch_setting(rtwdev); 2572 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2573 if (ret) { 2574 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2575 return ret; 2576 } 2577 2578 rtw89_pci_aphy_pwrcut(rtwdev); 2579 rtw89_pci_hci_ldo(rtwdev); 2580 rtw89_pci_dphy_delay(rtwdev); 2581 2582 ret = rtw89_pci_autok_x(rtwdev); 2583 if (ret) { 2584 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2585 return ret; 2586 } 2587 2588 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2589 if (ret) { 2590 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2591 return ret; 2592 } 2593 2594 rtw89_pci_power_wake(rtwdev, true); 2595 rtw89_pci_autoload_hang(rtwdev); 2596 rtw89_pci_l12_vmain(rtwdev); 2597 rtw89_pci_gen2_force_ib(rtwdev); 2598 rtw89_pci_l1_ent_lat(rtwdev); 2599 rtw89_pci_wd_exit_l1(rtwdev); 2600 rtw89_pci_set_sic(rtwdev); 2601 rtw89_pci_set_lbc(rtwdev); 2602 rtw89_pci_set_io_rcy(rtwdev); 2603 rtw89_pci_set_dbg(rtwdev); 2604 rtw89_pci_set_keep_reg(rtwdev); 2605 2606 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2607 2608 /* stop DMA activities */ 2609 rtw89_pci_ctrl_dma_all(rtwdev, false); 2610 2611 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2612 if (ret) { 2613 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2614 return ret; 2615 } 2616 2617 rtw89_pci_clr_idx_all(rtwdev); 2618 rtw89_pci_mode_op(rtwdev); 2619 2620 /* fill TRX BD indexes */ 2621 rtw89_pci_ops_reset(rtwdev); 2622 2623 ret = rtw89_pci_rst_bdram_pcie(rtwdev); 2624 if (ret) { 2625 rtw89_warn(rtwdev, "reset bdram busy\n"); 2626 return ret; 2627 } 2628 2629 /* disable all channels except to FW CMD channel to download firmware */ 2630 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, false); 2631 rtw89_pci_ctrl_txdma_fw_ch_pcie(rtwdev, true); 2632 2633 /* start DMA activities */ 2634 rtw89_pci_ctrl_dma_all(rtwdev, true); 2635 2636 return 0; 2637 } 2638 2639 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2640 { 2641 u32 val; 2642 2643 if (!en) 2644 return 0; 2645 2646 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2647 if (rtw89_pci_ltr_is_err_reg_val(val)) 2648 return -EINVAL; 2649 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2650 if (rtw89_pci_ltr_is_err_reg_val(val)) 2651 return -EINVAL; 2652 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2653 if (rtw89_pci_ltr_is_err_reg_val(val)) 2654 return -EINVAL; 2655 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2656 if (rtw89_pci_ltr_is_err_reg_val(val)) 2657 return -EINVAL; 2658 2659 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 2660 B_AX_LTR_WD_NOEMP_CHK); 2661 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2662 PCI_LTR_SPC_500US); 2663 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2664 PCI_LTR_IDLE_TIMER_3_2MS); 2665 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2666 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2667 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 2668 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2669 2670 return 0; 2671 } 2672 EXPORT_SYMBOL(rtw89_pci_ltr_set); 2673 2674 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2675 { 2676 u32 dec_ctrl; 2677 u32 val32; 2678 2679 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2680 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2681 return -EINVAL; 2682 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2683 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2684 return -EINVAL; 2685 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2686 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2687 return -EINVAL; 2688 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2689 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2690 return -EINVAL; 2691 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2692 if (rtw89_pci_ltr_is_err_reg_val(val32)) 2693 return -EINVAL; 2694 2695 if (!en) { 2696 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2697 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2698 B_AX_LTR_REQ_DRV; 2699 } else { 2700 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2701 } 2702 2703 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2704 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2705 2706 if (en) 2707 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2708 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2709 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2710 PCI_LTR_IDLE_TIMER_3_2MS); 2711 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2712 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2713 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2714 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2715 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2716 2717 return 0; 2718 } 2719 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2720 2721 static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 2722 { 2723 const struct rtw89_pci_info *info = rtwdev->pci_info; 2724 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2725 int ret; 2726 2727 ret = info->ltr_set(rtwdev, true); 2728 if (ret) { 2729 rtw89_err(rtwdev, "pci ltr set fail\n"); 2730 return ret; 2731 } 2732 if (chip_id == RTL8852A) { 2733 /* ltr sw trigger */ 2734 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2735 } 2736 if (chip_id == RTL8852A || chip_id == RTL8852B) { 2737 /* ADDR info 8-byte mode */ 2738 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2739 B_AX_HOST_ADDR_INFO_8B_SEL); 2740 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2741 } 2742 2743 /* enable DMA for all queues */ 2744 rtw89_pci_ctrl_txdma_ch_pcie(rtwdev, true); 2745 2746 /* Release PCI IO */ 2747 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 2748 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2749 2750 return 0; 2751 } 2752 2753 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2754 struct pci_dev *pdev) 2755 { 2756 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2757 int ret; 2758 2759 ret = pci_enable_device(pdev); 2760 if (ret) { 2761 rtw89_err(rtwdev, "failed to enable pci device\n"); 2762 return ret; 2763 } 2764 2765 pci_set_master(pdev); 2766 pci_set_drvdata(pdev, rtwdev->hw); 2767 2768 rtwpci->pdev = pdev; 2769 2770 return 0; 2771 } 2772 2773 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2774 struct pci_dev *pdev) 2775 { 2776 pci_disable_device(pdev); 2777 } 2778 2779 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2780 struct pci_dev *pdev) 2781 { 2782 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2783 unsigned long resource_len; 2784 u8 bar_id = 2; 2785 int ret; 2786 2787 ret = pci_request_regions(pdev, KBUILD_MODNAME); 2788 if (ret) { 2789 rtw89_err(rtwdev, "failed to request pci regions\n"); 2790 goto err; 2791 } 2792 2793 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2794 if (ret) { 2795 rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2796 goto err_release_regions; 2797 } 2798 2799 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2800 if (ret) { 2801 rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2802 goto err_release_regions; 2803 } 2804 2805 #if defined(__FreeBSD__) 2806 linuxkpi_pcim_want_to_use_bus_functions(pdev); 2807 #endif 2808 resource_len = pci_resource_len(pdev, bar_id); 2809 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2810 if (!rtwpci->mmap) { 2811 rtw89_err(rtwdev, "failed to map pci io\n"); 2812 ret = -EIO; 2813 goto err_release_regions; 2814 } 2815 2816 return 0; 2817 2818 err_release_regions: 2819 pci_release_regions(pdev); 2820 err: 2821 return ret; 2822 } 2823 2824 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2825 struct pci_dev *pdev) 2826 { 2827 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2828 2829 if (rtwpci->mmap) { 2830 pci_iounmap(pdev, rtwpci->mmap); 2831 pci_release_regions(pdev); 2832 } 2833 } 2834 2835 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2836 struct pci_dev *pdev, 2837 struct rtw89_pci_tx_ring *tx_ring) 2838 { 2839 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2840 u8 *head = wd_ring->head; 2841 dma_addr_t dma = wd_ring->dma; 2842 u32 page_size = wd_ring->page_size; 2843 u32 page_num = wd_ring->page_num; 2844 u32 ring_sz = page_size * page_num; 2845 2846 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2847 wd_ring->head = NULL; 2848 } 2849 2850 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2851 struct pci_dev *pdev, 2852 struct rtw89_pci_tx_ring *tx_ring) 2853 { 2854 int ring_sz; 2855 u8 *head; 2856 dma_addr_t dma; 2857 2858 head = tx_ring->bd_ring.head; 2859 dma = tx_ring->bd_ring.dma; 2860 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2861 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2862 2863 tx_ring->bd_ring.head = NULL; 2864 } 2865 2866 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2867 struct pci_dev *pdev) 2868 { 2869 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2870 const struct rtw89_pci_info *info = rtwdev->pci_info; 2871 struct rtw89_pci_tx_ring *tx_ring; 2872 int i; 2873 2874 for (i = 0; i < RTW89_TXCH_NUM; i++) { 2875 if (info->tx_dma_ch_mask & BIT(i)) 2876 continue; 2877 tx_ring = &rtwpci->tx_rings[i]; 2878 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2879 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2880 } 2881 } 2882 2883 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2884 struct pci_dev *pdev, 2885 struct rtw89_pci_rx_ring *rx_ring) 2886 { 2887 struct rtw89_pci_rx_info *rx_info; 2888 struct sk_buff *skb; 2889 dma_addr_t dma; 2890 u32 buf_sz; 2891 u8 *head; 2892 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2893 int i; 2894 2895 buf_sz = rx_ring->buf_sz; 2896 for (i = 0; i < rx_ring->bd_ring.len; i++) { 2897 skb = rx_ring->buf[i]; 2898 if (!skb) 2899 continue; 2900 2901 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2902 dma = rx_info->dma; 2903 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2904 dev_kfree_skb(skb); 2905 rx_ring->buf[i] = NULL; 2906 } 2907 2908 head = rx_ring->bd_ring.head; 2909 dma = rx_ring->bd_ring.dma; 2910 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2911 2912 rx_ring->bd_ring.head = NULL; 2913 } 2914 2915 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2916 struct pci_dev *pdev) 2917 { 2918 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2919 struct rtw89_pci_rx_ring *rx_ring; 2920 int i; 2921 2922 for (i = 0; i < RTW89_RXCH_NUM; i++) { 2923 rx_ring = &rtwpci->rx_rings[i]; 2924 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2925 } 2926 } 2927 2928 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2929 struct pci_dev *pdev) 2930 { 2931 rtw89_pci_free_rx_rings(rtwdev, pdev); 2932 rtw89_pci_free_tx_rings(rtwdev, pdev); 2933 } 2934 2935 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2936 struct rtw89_pci_rx_ring *rx_ring, 2937 struct sk_buff *skb, int buf_sz, u32 idx) 2938 { 2939 struct rtw89_pci_rx_info *rx_info; 2940 struct rtw89_pci_rx_bd_32 *rx_bd; 2941 dma_addr_t dma; 2942 2943 if (!skb) 2944 return -EINVAL; 2945 2946 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2947 if (dma_mapping_error(&pdev->dev, dma)) 2948 return -EBUSY; 2949 2950 rx_info = RTW89_PCI_RX_SKB_CB(skb); 2951 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2952 2953 memset(rx_bd, 0, sizeof(*rx_bd)); 2954 rx_bd->buf_size = cpu_to_le16(buf_sz); 2955 rx_bd->dma = cpu_to_le32(dma); 2956 rx_info->dma = dma; 2957 2958 return 0; 2959 } 2960 2961 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2962 struct pci_dev *pdev, 2963 struct rtw89_pci_tx_ring *tx_ring, 2964 enum rtw89_tx_channel txch) 2965 { 2966 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2967 struct rtw89_pci_tx_wd *txwd; 2968 dma_addr_t dma; 2969 dma_addr_t cur_paddr; 2970 u8 *head; 2971 u8 *cur_vaddr; 2972 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2973 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2974 u32 ring_sz = page_size * page_num; 2975 u32 page_offset; 2976 int i; 2977 2978 /* FWCMD queue doesn't use txwd as pages */ 2979 if (txch == RTW89_TXCH_CH12) 2980 return 0; 2981 2982 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2983 if (!head) 2984 return -ENOMEM; 2985 2986 INIT_LIST_HEAD(&wd_ring->free_pages); 2987 wd_ring->head = head; 2988 wd_ring->dma = dma; 2989 wd_ring->page_size = page_size; 2990 wd_ring->page_num = page_num; 2991 2992 page_offset = 0; 2993 for (i = 0; i < page_num; i++) { 2994 txwd = &wd_ring->pages[i]; 2995 cur_paddr = dma + page_offset; 2996 cur_vaddr = head + page_offset; 2997 2998 skb_queue_head_init(&txwd->queue); 2999 INIT_LIST_HEAD(&txwd->list); 3000 txwd->paddr = cur_paddr; 3001 txwd->vaddr = cur_vaddr; 3002 txwd->len = page_size; 3003 txwd->seq = i; 3004 rtw89_pci_enqueue_txwd(tx_ring, txwd); 3005 3006 page_offset += page_size; 3007 } 3008 3009 return 0; 3010 } 3011 3012 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 3013 struct pci_dev *pdev, 3014 struct rtw89_pci_tx_ring *tx_ring, 3015 u32 desc_size, u32 len, 3016 enum rtw89_tx_channel txch) 3017 { 3018 const struct rtw89_pci_ch_dma_addr *txch_addr; 3019 int ring_sz = desc_size * len; 3020 u8 *head; 3021 dma_addr_t dma; 3022 int ret; 3023 3024 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 3025 if (ret) { 3026 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 3027 goto err; 3028 } 3029 3030 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3031 if (ret) { 3032 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3033 goto err_free_wd_ring; 3034 } 3035 3036 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3037 if (!head) { 3038 ret = -ENOMEM; 3039 goto err_free_wd_ring; 3040 } 3041 3042 INIT_LIST_HEAD(&tx_ring->busy_pages); 3043 tx_ring->bd_ring.head = head; 3044 tx_ring->bd_ring.dma = dma; 3045 tx_ring->bd_ring.len = len; 3046 tx_ring->bd_ring.desc_size = desc_size; 3047 tx_ring->bd_ring.addr = *txch_addr; 3048 tx_ring->bd_ring.wp = 0; 3049 tx_ring->bd_ring.rp = 0; 3050 tx_ring->txch = txch; 3051 3052 return 0; 3053 3054 err_free_wd_ring: 3055 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3056 err: 3057 return ret; 3058 } 3059 3060 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 3061 struct pci_dev *pdev) 3062 { 3063 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3064 const struct rtw89_pci_info *info = rtwdev->pci_info; 3065 struct rtw89_pci_tx_ring *tx_ring; 3066 u32 desc_size; 3067 u32 len; 3068 u32 i, tx_allocated; 3069 int ret; 3070 3071 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3072 if (info->tx_dma_ch_mask & BIT(i)) 3073 continue; 3074 tx_ring = &rtwpci->tx_rings[i]; 3075 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3076 len = RTW89_PCI_TXBD_NUM_MAX; 3077 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3078 desc_size, len, i); 3079 if (ret) { 3080 #if defined(__linux__) 3081 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3082 #elif defined(__FreeBSD__) 3083 rtw89_err(rtwdev, "failed to alloc tx ring %d: ret=%d\n", i, ret); 3084 #endif 3085 goto err_free; 3086 } 3087 } 3088 3089 return 0; 3090 3091 err_free: 3092 tx_allocated = i; 3093 for (i = 0; i < tx_allocated; i++) { 3094 tx_ring = &rtwpci->tx_rings[i]; 3095 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3096 } 3097 3098 return ret; 3099 } 3100 3101 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3102 struct pci_dev *pdev, 3103 struct rtw89_pci_rx_ring *rx_ring, 3104 u32 desc_size, u32 len, u32 rxch) 3105 { 3106 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3107 struct sk_buff *skb; 3108 u8 *head; 3109 dma_addr_t dma; 3110 int ring_sz = desc_size * len; 3111 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3112 int i, allocated; 3113 int ret; 3114 3115 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3116 if (ret) { 3117 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3118 return ret; 3119 } 3120 3121 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3122 if (!head) { 3123 ret = -ENOMEM; 3124 goto err; 3125 } 3126 3127 rx_ring->bd_ring.head = head; 3128 rx_ring->bd_ring.dma = dma; 3129 rx_ring->bd_ring.len = len; 3130 rx_ring->bd_ring.desc_size = desc_size; 3131 rx_ring->bd_ring.addr = *rxch_addr; 3132 rx_ring->bd_ring.wp = 0; 3133 rx_ring->bd_ring.rp = 0; 3134 rx_ring->buf_sz = buf_sz; 3135 rx_ring->diliver_skb = NULL; 3136 rx_ring->diliver_desc.ready = false; 3137 3138 for (i = 0; i < len; i++) { 3139 skb = dev_alloc_skb(buf_sz); 3140 if (!skb) { 3141 ret = -ENOMEM; 3142 goto err_free; 3143 } 3144 3145 memset(skb->data, 0, buf_sz); 3146 rx_ring->buf[i] = skb; 3147 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3148 buf_sz, i); 3149 if (ret) { 3150 #if defined(__linux__) 3151 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3152 #elif defined(__FreeBSD__) 3153 rtw89_err(rtwdev, "failed to init rx buf %d ret=%d\n", i, ret); 3154 #endif 3155 dev_kfree_skb_any(skb); 3156 rx_ring->buf[i] = NULL; 3157 goto err_free; 3158 } 3159 } 3160 3161 return 0; 3162 3163 err_free: 3164 allocated = i; 3165 for (i = 0; i < allocated; i++) { 3166 skb = rx_ring->buf[i]; 3167 if (!skb) 3168 continue; 3169 dma = *((dma_addr_t *)skb->cb); 3170 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3171 dev_kfree_skb(skb); 3172 rx_ring->buf[i] = NULL; 3173 } 3174 3175 head = rx_ring->bd_ring.head; 3176 dma = rx_ring->bd_ring.dma; 3177 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3178 3179 rx_ring->bd_ring.head = NULL; 3180 err: 3181 return ret; 3182 } 3183 3184 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3185 struct pci_dev *pdev) 3186 { 3187 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3188 struct rtw89_pci_rx_ring *rx_ring; 3189 u32 desc_size; 3190 u32 len; 3191 int i, rx_allocated; 3192 int ret; 3193 3194 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3195 rx_ring = &rtwpci->rx_rings[i]; 3196 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3197 len = RTW89_PCI_RXBD_NUM_MAX; 3198 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3199 desc_size, len, i); 3200 if (ret) { 3201 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3202 goto err_free; 3203 } 3204 } 3205 3206 return 0; 3207 3208 err_free: 3209 rx_allocated = i; 3210 for (i = 0; i < rx_allocated; i++) { 3211 rx_ring = &rtwpci->rx_rings[i]; 3212 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3213 } 3214 3215 return ret; 3216 } 3217 3218 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3219 struct pci_dev *pdev) 3220 { 3221 int ret; 3222 3223 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3224 if (ret) { 3225 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3226 goto err; 3227 } 3228 3229 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3230 if (ret) { 3231 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3232 goto err_free_tx_rings; 3233 } 3234 3235 return 0; 3236 3237 err_free_tx_rings: 3238 rtw89_pci_free_tx_rings(rtwdev, pdev); 3239 err: 3240 return ret; 3241 } 3242 3243 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3244 struct rtw89_pci *rtwpci) 3245 { 3246 skb_queue_head_init(&rtwpci->h2c_queue); 3247 skb_queue_head_init(&rtwpci->h2c_release_queue); 3248 } 3249 3250 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3251 struct pci_dev *pdev) 3252 { 3253 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3254 int ret; 3255 3256 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3257 if (ret) { 3258 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3259 goto err; 3260 } 3261 3262 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3263 if (ret) { 3264 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3265 goto err_pci_unmap; 3266 } 3267 3268 rtw89_pci_h2c_init(rtwdev, rtwpci); 3269 3270 spin_lock_init(&rtwpci->irq_lock); 3271 spin_lock_init(&rtwpci->trx_lock); 3272 3273 return 0; 3274 3275 err_pci_unmap: 3276 rtw89_pci_clear_mapping(rtwdev, pdev); 3277 err: 3278 return ret; 3279 } 3280 3281 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3282 struct pci_dev *pdev) 3283 { 3284 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3285 3286 rtw89_pci_free_trx_rings(rtwdev, pdev); 3287 rtw89_pci_clear_mapping(rtwdev, pdev); 3288 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3289 skb_queue_len(&rtwpci->h2c_queue), true); 3290 } 3291 3292 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3293 { 3294 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3295 const struct rtw89_chip_info *chip = rtwdev->chip; 3296 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; 3297 3298 if (chip->chip_id == RTL8851B) 3299 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; 3300 3301 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3302 3303 if (rtwpci->under_recovery) { 3304 rtwpci->intrs[0] = hs0isr_ind_int_en; 3305 rtwpci->intrs[1] = 0; 3306 } else { 3307 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3308 B_AX_RXDMA_INT_EN | 3309 B_AX_RXP1DMA_INT_EN | 3310 B_AX_RPQDMA_INT_EN | 3311 B_AX_RXDMA_STUCK_INT_EN | 3312 B_AX_RDU_INT_EN | 3313 B_AX_RPQBD_FULL_INT_EN | 3314 hs0isr_ind_int_en; 3315 3316 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3317 } 3318 } 3319 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3320 3321 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3322 { 3323 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3324 3325 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3326 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3327 rtwpci->intrs[0] = 0; 3328 rtwpci->intrs[1] = 0; 3329 } 3330 3331 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3332 { 3333 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3334 3335 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3336 B_AX_HS1ISR_IND_INT_EN | 3337 B_AX_HS0ISR_IND_INT_EN; 3338 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3339 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3340 B_AX_RXDMA_INT_EN | 3341 B_AX_RXP1DMA_INT_EN | 3342 B_AX_RPQDMA_INT_EN | 3343 B_AX_RXDMA_STUCK_INT_EN | 3344 B_AX_RDU_INT_EN | 3345 B_AX_RPQBD_FULL_INT_EN; 3346 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3347 } 3348 3349 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3350 { 3351 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3352 3353 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3354 B_AX_HS0ISR_IND_INT_EN; 3355 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3356 rtwpci->intrs[0] = 0; 3357 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3358 } 3359 3360 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3361 { 3362 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3363 3364 if (rtwpci->under_recovery) 3365 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3366 else if (rtwpci->low_power) 3367 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3368 else 3369 rtw89_pci_default_intr_mask_v1(rtwdev); 3370 } 3371 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3372 3373 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3374 struct pci_dev *pdev) 3375 { 3376 unsigned long flags = 0; 3377 int ret; 3378 3379 flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3380 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3381 if (ret < 0) { 3382 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3383 goto err; 3384 } 3385 3386 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3387 rtw89_pci_interrupt_handler, 3388 rtw89_pci_interrupt_threadfn, 3389 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3390 if (ret) { 3391 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3392 goto err_free_vector; 3393 } 3394 3395 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3396 3397 return 0; 3398 3399 err_free_vector: 3400 pci_free_irq_vectors(pdev); 3401 err: 3402 return ret; 3403 } 3404 3405 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3406 struct pci_dev *pdev) 3407 { 3408 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3409 pci_free_irq_vectors(pdev); 3410 } 3411 3412 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3413 { 3414 u16 bin = 0, gray_bit; 3415 u32 bit_idx; 3416 3417 for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3418 gray_bit = (gray_code >> bit_idx) & 0x1; 3419 if (bit_num - bit_idx > 1) 3420 gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3421 bin |= (gray_bit << bit_idx); 3422 } 3423 3424 return bin; 3425 } 3426 3427 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3428 { 3429 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3430 struct pci_dev *pdev = rtwpci->pdev; 3431 u16 val16, filter_out_val; 3432 u32 val, phy_offset; 3433 int ret; 3434 3435 if (rtwdev->chip->chip_id != RTL8852C) 3436 return 0; 3437 3438 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3439 if (val == B_AX_ASPM_CTRL_L1) 3440 return 0; 3441 3442 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3443 if (ret) 3444 return ret; 3445 3446 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3447 if (val == RTW89_PCIE_GEN1_SPEED) { 3448 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3449 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3450 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3451 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3452 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3453 val16 | B_PCIE_BIT_PINOUT_DIS); 3454 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3455 val16 & ~B_PCIE_BIT_RD_SEL); 3456 3457 val16 = rtw89_read16_mask(rtwdev, 3458 phy_offset + RAC_ANA1F * RAC_MULT, 3459 FILTER_OUT_EQ_MASK); 3460 val16 = gray_code_to_bin(val16, hweight16(val16)); 3461 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3462 RAC_MULT); 3463 filter_out_val &= ~REG_FILTER_OUT_MASK; 3464 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3465 3466 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3467 filter_out_val); 3468 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3469 B_BAC_EQ_SEL); 3470 rtw89_write16_set(rtwdev, 3471 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3472 B_PCIE_BIT_PSAVE); 3473 } else { 3474 return -EOPNOTSUPP; 3475 } 3476 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3477 B_PCIE_BIT_PSAVE); 3478 3479 return 0; 3480 } 3481 3482 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3483 { 3484 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3485 int ret; 3486 3487 if (rtw89_pci_disable_clkreq) 3488 return; 3489 3490 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3491 PCIE_CLKDLY_HW_30US); 3492 if (ret) 3493 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3494 3495 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3496 if (enable) 3497 ret = rtw89_pci_config_byte_set(rtwdev, 3498 RTW89_PCIE_L1_CTRL, 3499 RTW89_PCIE_BIT_CLK); 3500 else 3501 ret = rtw89_pci_config_byte_clr(rtwdev, 3502 RTW89_PCIE_L1_CTRL, 3503 RTW89_PCIE_BIT_CLK); 3504 if (ret) 3505 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3506 enable ? "set" : "unset", ret); 3507 } else if (chip_id == RTL8852C) { 3508 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3509 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3510 if (enable) 3511 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3512 B_AX_CLK_REQ_N); 3513 else 3514 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3515 B_AX_CLK_REQ_N); 3516 } 3517 } 3518 3519 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3520 { 3521 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3522 u8 value = 0; 3523 int ret; 3524 3525 if (rtw89_pci_disable_aspm_l1) 3526 return; 3527 3528 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3529 if (ret) 3530 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3531 3532 value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3533 value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3534 FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3535 3536 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3537 if (ret) 3538 rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3539 3540 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3541 if (enable) 3542 ret = rtw89_pci_config_byte_set(rtwdev, 3543 RTW89_PCIE_L1_CTRL, 3544 RTW89_PCIE_BIT_L1); 3545 else 3546 ret = rtw89_pci_config_byte_clr(rtwdev, 3547 RTW89_PCIE_L1_CTRL, 3548 RTW89_PCIE_BIT_L1); 3549 } else if (chip_id == RTL8852C) { 3550 if (enable) 3551 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3552 B_AX_ASPM_CTRL_L1); 3553 else 3554 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3555 B_AX_ASPM_CTRL_L1); 3556 } 3557 if (ret) 3558 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3559 enable ? "set" : "unset", ret); 3560 } 3561 3562 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3563 { 3564 struct rtw89_traffic_stats *stats = &rtwdev->stats; 3565 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3566 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3567 u32 val = 0; 3568 3569 if (!rtwdev->scanning && 3570 (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 3571 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3572 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3573 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3574 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3575 3576 rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 3577 } 3578 3579 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3580 { 3581 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3582 struct pci_dev *pdev = rtwpci->pdev; 3583 u16 link_ctrl; 3584 int ret; 3585 3586 /* Though there is standard PCIE configuration space to set the 3587 * link control register, but by Realtek's design, driver should 3588 * check if host supports CLKREQ/ASPM to enable the HW module. 3589 * 3590 * These functions are implemented by two HW modules associated, 3591 * one is responsible to access PCIE configuration space to 3592 * follow the host settings, and another is in charge of doing 3593 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3594 * the host does not support it, and due to some reasons or wrong 3595 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3596 * loss if HW misbehaves on the link. 3597 * 3598 * Hence it's designed that driver should first check the PCIE 3599 * configuration space is sync'ed and enabled, then driver can turn 3600 * on the other module that is actually working on the mechanism. 3601 */ 3602 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3603 if (ret) { 3604 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3605 return; 3606 } 3607 3608 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3609 rtw89_pci_clkreq_set(rtwdev, true); 3610 3611 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3612 rtw89_pci_aspm_set(rtwdev, true); 3613 } 3614 3615 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3616 { 3617 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3618 int ret; 3619 3620 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3621 if (enable) 3622 ret = rtw89_pci_config_byte_set(rtwdev, 3623 RTW89_PCIE_TIMER_CTRL, 3624 RTW89_PCIE_BIT_L1SUB); 3625 else 3626 ret = rtw89_pci_config_byte_clr(rtwdev, 3627 RTW89_PCIE_TIMER_CTRL, 3628 RTW89_PCIE_BIT_L1SUB); 3629 if (ret) 3630 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3631 enable ? "set" : "unset", ret); 3632 } else if (chip_id == RTL8852C) { 3633 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 3634 RTW89_PCIE_BIT_ASPM_L11 | 3635 RTW89_PCIE_BIT_PCI_L11); 3636 if (ret) 3637 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 3638 if (enable) 3639 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3640 B_AX_L1SUB_DISABLE); 3641 else 3642 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3643 B_AX_L1SUB_DISABLE); 3644 } 3645 } 3646 3647 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3648 { 3649 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3650 struct pci_dev *pdev = rtwpci->pdev; 3651 u32 l1ss_cap_ptr, l1ss_ctrl; 3652 3653 if (rtw89_pci_disable_l1ss) 3654 return; 3655 3656 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3657 if (!l1ss_cap_ptr) 3658 return; 3659 3660 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3661 3662 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3663 rtw89_pci_l1ss_set(rtwdev, true); 3664 } 3665 3666 static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 3667 { 3668 int ret = 0; 3669 u32 sts; 3670 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3671 3672 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3673 10, 1000, false, rtwdev, 3674 R_AX_PCIE_DMA_BUSY1); 3675 if (ret) { 3676 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3677 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3678 return -EINVAL; 3679 } 3680 return ret; 3681 } 3682 3683 static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 3684 { 3685 u32 val; 3686 int ret; 3687 3688 if (rtwdev->chip->chip_id == RTL8852C) 3689 return 0; 3690 3691 rtw89_pci_ctrl_dma_all(rtwdev, false); 3692 ret = rtw89_pci_poll_io_idle(rtwdev); 3693 if (ret) { 3694 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3695 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3696 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3697 R_AX_DBG_ERR_FLAG, val); 3698 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3699 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 3700 if (val & B_AX_RX_STUCK) 3701 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 3702 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3703 ret = rtw89_pci_poll_io_idle(rtwdev); 3704 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3705 rtw89_debug(rtwdev, RTW89_DBG_HCI, 3706 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3707 R_AX_DBG_ERR_FLAG, val); 3708 } 3709 3710 return ret; 3711 } 3712 3713 3714 3715 static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 3716 { 3717 int ret = 0; 3718 u32 val32, sts; 3719 3720 val32 = B_AX_RST_BDRAM; 3721 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3722 3723 ret = read_poll_timeout_atomic(rtw89_read32, sts, 3724 (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 3725 true, rtwdev, R_AX_PCIE_INIT_CFG1); 3726 return ret; 3727 } 3728 3729 static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 3730 { 3731 u32 ret; 3732 3733 if (rtwdev->chip->chip_id == RTL8852C) 3734 return 0; 3735 3736 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 3737 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 3738 rtw89_pci_clr_idx_all(rtwdev); 3739 3740 ret = rtw89_pci_rst_bdram(rtwdev); 3741 if (ret) 3742 return ret; 3743 3744 rtw89_pci_ctrl_dma_all(rtwdev, true); 3745 return ret; 3746 } 3747 3748 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3749 enum rtw89_lv1_rcvy_step step) 3750 { 3751 int ret; 3752 3753 switch (step) { 3754 case RTW89_LV1_RCVY_STEP_1: 3755 ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 3756 if (ret) 3757 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3758 3759 break; 3760 3761 case RTW89_LV1_RCVY_STEP_2: 3762 ret = rtw89_pci_lv1rst_start_dma(rtwdev); 3763 if (ret) 3764 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3765 break; 3766 3767 default: 3768 return -EINVAL; 3769 } 3770 3771 return ret; 3772 } 3773 3774 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3775 { 3776 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3777 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3778 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3779 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3780 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3781 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3782 } 3783 3784 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3785 { 3786 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3787 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3788 unsigned long flags; 3789 int work_done; 3790 3791 rtwdev->napi_budget_countdown = budget; 3792 3793 rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 3794 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3795 if (work_done == budget) 3796 return budget; 3797 3798 rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 3799 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3800 if (work_done < budget && napi_complete_done(napi, work_done)) { 3801 spin_lock_irqsave(&rtwpci->irq_lock, flags); 3802 if (likely(rtwpci->running)) 3803 rtw89_chip_enable_intr(rtwdev, rtwpci); 3804 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3805 } 3806 3807 return work_done; 3808 } 3809 3810 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3811 { 3812 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3813 struct rtw89_dev *rtwdev = hw->priv; 3814 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3815 3816 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3817 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3818 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3819 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3820 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3821 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3822 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3823 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3824 } else { 3825 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3826 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3827 } 3828 3829 return 0; 3830 } 3831 3832 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3833 { 3834 if (rtwdev->chip->chip_id == RTL8852C) 3835 return; 3836 3837 /* Hardware need write the reg twice to ensure the setting work */ 3838 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3839 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3840 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3841 RTW89_PCIE_BIT_CFG_RST_MSTATE); 3842 } 3843 3844 static int __maybe_unused rtw89_pci_resume(struct device *dev) 3845 { 3846 struct ieee80211_hw *hw = dev_get_drvdata(dev); 3847 struct rtw89_dev *rtwdev = hw->priv; 3848 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3849 3850 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3851 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3852 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3853 if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { 3854 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3855 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3856 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3857 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3858 } else { 3859 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3860 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3861 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3862 B_AX_SEL_REQ_ENTR_L1); 3863 } 3864 rtw89_pci_l2_hci_ldo(rtwdev); 3865 rtw89_pci_filter_out(rtwdev); 3866 rtw89_pci_link_cfg(rtwdev); 3867 rtw89_pci_l1ss_cfg(rtwdev); 3868 3869 return 0; 3870 } 3871 3872 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 3873 EXPORT_SYMBOL(rtw89_pm_ops); 3874 3875 static const struct rtw89_hci_ops rtw89_pci_ops = { 3876 .tx_write = rtw89_pci_ops_tx_write, 3877 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 3878 .flush_queues = rtw89_pci_ops_flush_queues, 3879 .reset = rtw89_pci_ops_reset, 3880 .start = rtw89_pci_ops_start, 3881 .stop = rtw89_pci_ops_stop, 3882 .pause = rtw89_pci_ops_pause, 3883 .switch_mode = rtw89_pci_ops_switch_mode, 3884 .recalc_int_mit = rtw89_pci_recalc_int_mit, 3885 3886 .read8 = rtw89_pci_ops_read8, 3887 .read16 = rtw89_pci_ops_read16, 3888 .read32 = rtw89_pci_ops_read32, 3889 .write8 = rtw89_pci_ops_write8, 3890 .write16 = rtw89_pci_ops_write16, 3891 .write32 = rtw89_pci_ops_write32, 3892 3893 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 3894 .mac_post_init = rtw89_pci_ops_mac_post_init, 3895 .deinit = rtw89_pci_ops_deinit, 3896 3897 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 3898 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 3899 .dump_err_status = rtw89_pci_ops_dump_err_status, 3900 .napi_poll = rtw89_pci_napi_poll, 3901 3902 .recovery_start = rtw89_pci_ops_recovery_start, 3903 .recovery_complete = rtw89_pci_ops_recovery_complete, 3904 3905 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_pcie, 3906 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_pcie, 3907 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 3908 .poll_txdma_ch = rtw89_poll_txdma_ch_idle_pcie, 3909 .clr_idx_all = rtw89_pci_clr_idx_all, 3910 .clear = rtw89_pci_clear_resource, 3911 .disable_intr = rtw89_pci_disable_intr_lock, 3912 .enable_intr = rtw89_pci_enable_intr_lock, 3913 .rst_bdram = rtw89_pci_rst_bdram_pcie, 3914 }; 3915 3916 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3917 { 3918 struct rtw89_dev *rtwdev; 3919 const struct rtw89_driver_info *info; 3920 const struct rtw89_pci_info *pci_info; 3921 int ret; 3922 3923 info = (const struct rtw89_driver_info *)id->driver_data; 3924 3925 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 3926 sizeof(struct rtw89_pci), 3927 info->chip); 3928 if (!rtwdev) { 3929 dev_err(&pdev->dev, "failed to allocate hw\n"); 3930 return -ENOMEM; 3931 } 3932 3933 pci_info = info->bus.pci; 3934 3935 rtwdev->pci_info = info->bus.pci; 3936 rtwdev->hci.ops = &rtw89_pci_ops; 3937 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 3938 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 3939 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 3940 3941 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 3942 3943 ret = rtw89_core_init(rtwdev); 3944 if (ret) { 3945 rtw89_err(rtwdev, "failed to initialise core\n"); 3946 goto err_release_hw; 3947 } 3948 3949 ret = rtw89_pci_claim_device(rtwdev, pdev); 3950 if (ret) { 3951 rtw89_err(rtwdev, "failed to claim pci device\n"); 3952 goto err_core_deinit; 3953 } 3954 3955 ret = rtw89_pci_setup_resource(rtwdev, pdev); 3956 if (ret) { 3957 rtw89_err(rtwdev, "failed to setup pci resource\n"); 3958 goto err_declaim_pci; 3959 } 3960 3961 ret = rtw89_chip_info_setup(rtwdev); 3962 if (ret) { 3963 rtw89_err(rtwdev, "failed to setup chip information\n"); 3964 goto err_clear_resource; 3965 } 3966 3967 rtw89_pci_filter_out(rtwdev); 3968 rtw89_pci_link_cfg(rtwdev); 3969 rtw89_pci_l1ss_cfg(rtwdev); 3970 3971 rtw89_core_napi_init(rtwdev); 3972 3973 ret = rtw89_pci_request_irq(rtwdev, pdev); 3974 if (ret) { 3975 rtw89_err(rtwdev, "failed to request pci irq\n"); 3976 goto err_deinit_napi; 3977 } 3978 3979 ret = rtw89_core_register(rtwdev); 3980 if (ret) { 3981 rtw89_err(rtwdev, "failed to register core\n"); 3982 goto err_free_irq; 3983 } 3984 3985 return 0; 3986 3987 err_free_irq: 3988 rtw89_pci_free_irq(rtwdev, pdev); 3989 err_deinit_napi: 3990 rtw89_core_napi_deinit(rtwdev); 3991 err_clear_resource: 3992 rtw89_pci_clear_resource(rtwdev, pdev); 3993 err_declaim_pci: 3994 rtw89_pci_declaim_device(rtwdev, pdev); 3995 err_core_deinit: 3996 rtw89_core_deinit(rtwdev); 3997 err_release_hw: 3998 rtw89_free_ieee80211_hw(rtwdev); 3999 4000 return ret; 4001 } 4002 EXPORT_SYMBOL(rtw89_pci_probe); 4003 4004 void rtw89_pci_remove(struct pci_dev *pdev) 4005 { 4006 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4007 struct rtw89_dev *rtwdev; 4008 4009 rtwdev = hw->priv; 4010 4011 rtw89_pci_free_irq(rtwdev, pdev); 4012 rtw89_core_napi_deinit(rtwdev); 4013 rtw89_core_unregister(rtwdev); 4014 rtw89_pci_clear_resource(rtwdev, pdev); 4015 rtw89_pci_declaim_device(rtwdev, pdev); 4016 rtw89_core_deinit(rtwdev); 4017 rtw89_free_ieee80211_hw(rtwdev); 4018 } 4019 EXPORT_SYMBOL(rtw89_pci_remove); 4020 4021 MODULE_AUTHOR("Realtek Corporation"); 4022 MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 4023 MODULE_LICENSE("Dual BSD/GPL"); 4024 #if defined(__FreeBSD__) 4025 MODULE_VERSION(rtw89_pci, 1); 4026 MODULE_DEPEND(rtw89_pci, linuxkpi, 1, 1, 1); 4027 MODULE_DEPEND(rtw89_pci, linuxkpi_wlan, 1, 1, 1); 4028 #ifdef CONFIG_RTW89_DEBUGFS 4029 MODULE_DEPEND(rtw89_pci, lindebugfs, 1, 1, 1); 4030 #endif 4031 #endif 4032