1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #if defined(__FreeBSD__) 6 #define LINUXKPI_PARAM_PREFIX rtw89_pci_ 7 #endif 8 9 #include <linux/pci.h> 10 #if defined(__FreeBSD__) 11 #include <sys/rman.h> 12 #endif 13 14 #include "mac.h" 15 #include "pci.h" 16 #include "reg.h" 17 #include "ser.h" 18 19 static bool rtw89_pci_disable_clkreq; 20 static bool rtw89_pci_disable_aspm_l1; 21 static bool rtw89_pci_disable_l1ss; 22 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 23 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 24 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 25 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 26 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 27 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 28 29 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev, 30 u32 *phy_offset) 31 { 32 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 33 struct pci_dev *pdev = rtwpci->pdev; 34 u32 val; 35 int ret; 36 37 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 38 if (ret) 39 return ret; 40 41 val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK); 42 if (val == RTW89_PCIE_GEN1_SPEED) { 43 *phy_offset = R_RAC_DIRECT_OFFSET_G1; 44 } else if (val == RTW89_PCIE_GEN2_SPEED) { 45 *phy_offset = R_RAC_DIRECT_OFFSET_G2; 46 } else { 47 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val); 48 return -EFAULT; 49 } 50 51 return 0; 52 } 53 54 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) 55 { 56 u32 val; 57 int ret; 58 59 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM); 60 61 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 62 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 63 rtwdev, R_AX_PCIE_INIT_CFG1); 64 65 return ret; 66 } 67 68 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 69 struct rtw89_pci_dma_ring *bd_ring, 70 u32 cur_idx, bool tx) 71 { 72 const struct rtw89_pci_info *info = rtwdev->pci_info; 73 u32 cnt, cur_rp, wp, rp, len; 74 75 rp = bd_ring->rp; 76 wp = bd_ring->wp; 77 len = bd_ring->len; 78 79 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 80 if (tx) { 81 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 82 } else { 83 if (info->rx_ring_eq_is_full) 84 wp += 1; 85 86 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 87 } 88 89 bd_ring->rp = cur_rp; 90 91 return cnt; 92 } 93 94 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 95 struct rtw89_pci_tx_ring *tx_ring) 96 { 97 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 98 u32 addr_idx = bd_ring->addr.idx; 99 u32 cnt, idx; 100 101 idx = rtw89_read32(rtwdev, addr_idx); 102 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 103 104 return cnt; 105 } 106 107 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 108 struct rtw89_pci *rtwpci, 109 u32 cnt, bool release_all) 110 { 111 struct rtw89_pci_tx_data *tx_data; 112 struct sk_buff *skb; 113 u32 qlen; 114 115 while (cnt--) { 116 skb = skb_dequeue(&rtwpci->h2c_queue); 117 if (!skb) { 118 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 119 return; 120 } 121 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 122 } 123 124 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 125 if (!release_all) 126 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 127 128 while (qlen--) { 129 skb = skb_dequeue(&rtwpci->h2c_release_queue); 130 if (!skb) { 131 rtw89_err(rtwdev, "failed to release fwcmd\n"); 132 return; 133 } 134 tx_data = RTW89_PCI_TX_SKB_CB(skb); 135 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 136 DMA_TO_DEVICE); 137 dev_kfree_skb_any(skb); 138 } 139 } 140 141 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 142 struct rtw89_pci *rtwpci) 143 { 144 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 145 u32 cnt; 146 147 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 148 if (!cnt) 149 return; 150 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 151 } 152 153 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 154 struct rtw89_pci_rx_ring *rx_ring) 155 { 156 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 157 u32 addr_idx = bd_ring->addr.idx; 158 u32 cnt, idx; 159 160 idx = rtw89_read32(rtwdev, addr_idx); 161 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 162 163 return cnt; 164 } 165 166 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 167 struct sk_buff *skb) 168 { 169 struct rtw89_pci_rx_info *rx_info; 170 dma_addr_t dma; 171 172 rx_info = RTW89_PCI_RX_SKB_CB(skb); 173 dma = rx_info->dma; 174 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 175 DMA_FROM_DEVICE); 176 } 177 178 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 179 struct sk_buff *skb) 180 { 181 struct rtw89_pci_rx_info *rx_info; 182 dma_addr_t dma; 183 184 rx_info = RTW89_PCI_RX_SKB_CB(skb); 185 dma = rx_info->dma; 186 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 187 DMA_FROM_DEVICE); 188 } 189 190 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 191 struct sk_buff *skb) 192 { 193 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 194 struct rtw89_pci_rxbd_info *rxbd_info; 195 __le32 info; 196 197 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 198 info = rxbd_info->dword; 199 200 rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); 201 rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); 202 rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); 203 rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); 204 } 205 206 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, 207 struct rtw89_pci_rx_ring *rx_ring, 208 struct sk_buff *skb) 209 { 210 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 211 const struct rtw89_pci_info *info = rtwdev->pci_info; 212 u32 target_rx_tag; 213 214 if (!info->check_rx_tag) 215 return 0; 216 217 /* valid range is 1 ~ 0x1FFF */ 218 if (rx_ring->target_rx_tag == 0) 219 target_rx_tag = 1; 220 else 221 target_rx_tag = rx_ring->target_rx_tag; 222 223 if (rx_info->tag != target_rx_tag) { 224 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n", 225 rx_info->tag, target_rx_tag); 226 return -EAGAIN; 227 } 228 229 return 0; 230 } 231 232 static 233 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, 234 struct rtw89_pci_rx_ring *rx_ring, 235 struct sk_buff *skb) 236 { 237 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 238 int rx_tag_retry = 1000; 239 int ret; 240 241 do { 242 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 243 rtw89_pci_rxbd_info_update(rtwdev, skb); 244 245 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb); 246 if (ret != -EAGAIN) 247 break; 248 } while (rx_tag_retry--); 249 250 /* update target rx_tag for next RX */ 251 rx_ring->target_rx_tag = rx_info->tag + 1; 252 253 return ret; 254 } 255 256 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable) 257 { 258 const struct rtw89_pci_info *info = rtwdev->pci_info; 259 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 260 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 261 262 if (enable) { 263 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 264 if (dma_stop2->addr) 265 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 266 } else { 267 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 268 if (dma_stop2->addr) 269 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 270 } 271 } 272 273 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable) 274 { 275 const struct rtw89_pci_info *info = rtwdev->pci_info; 276 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 277 278 if (enable) 279 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 280 else 281 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 282 } 283 284 static bool 285 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 286 struct sk_buff *new, 287 const struct sk_buff *skb, u32 offset, 288 const struct rtw89_pci_rx_info *rx_info, 289 const struct rtw89_rx_desc_info *desc_info) 290 { 291 u32 copy_len = rx_info->len - offset; 292 293 if (unlikely(skb_tailroom(new) < copy_len)) { 294 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 295 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 296 rx_info->len, desc_info->pkt_size, offset, fs, ls); 297 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 298 skb->data, rx_info->len); 299 /* length of a single segment skb is desc_info->pkt_size */ 300 if (fs && ls) { 301 copy_len = desc_info->pkt_size; 302 } else { 303 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 304 return false; 305 } 306 } 307 308 skb_put_data(new, skb->data + offset, copy_len); 309 310 return true; 311 } 312 313 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev, 314 struct rtw89_pci_dma_ring *bd_ring) 315 { 316 const struct rtw89_pci_info *info = rtwdev->pci_info; 317 u32 wp = bd_ring->wp; 318 319 if (!info->rx_ring_eq_is_full) 320 return wp; 321 322 if (++wp >= bd_ring->len) 323 wp = 0; 324 325 return wp; 326 } 327 328 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 329 struct rtw89_pci_rx_ring *rx_ring) 330 { 331 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 332 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 333 const struct rtw89_pci_info *info = rtwdev->pci_info; 334 struct sk_buff *new = rx_ring->diliver_skb; 335 struct rtw89_pci_rx_info *rx_info; 336 struct sk_buff *skb; 337 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 338 u32 skb_idx; 339 u32 offset; 340 u32 cnt = 1; 341 bool fs, ls; 342 int ret; 343 344 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 345 skb = rx_ring->buf[skb_idx]; 346 347 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 348 if (ret) { 349 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 350 bd_ring->wp, ret); 351 goto err_sync_device; 352 } 353 354 rx_info = RTW89_PCI_RX_SKB_CB(skb); 355 fs = info->no_rxbd_fs ? !new : rx_info->fs; 356 ls = rx_info->ls; 357 358 if (unlikely(!fs || !ls)) 359 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 360 "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n", 361 fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0); 362 363 if (fs) { 364 if (new) { 365 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 366 "skb should not be ready before first segment start\n"); 367 goto err_sync_device; 368 } 369 if (desc_info->ready) { 370 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 371 goto err_sync_device; 372 } 373 374 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 375 376 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 377 if (!new) 378 goto err_sync_device; 379 380 rx_ring->diliver_skb = new; 381 382 /* first segment has RX desc */ 383 offset = desc_info->offset + desc_info->rxd_len; 384 } else { 385 offset = sizeof(struct rtw89_pci_rxbd_info); 386 if (!new) { 387 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 388 goto err_sync_device; 389 } 390 } 391 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 392 goto err_sync_device; 393 rtw89_pci_sync_skb_for_device(rtwdev, skb); 394 rtw89_pci_rxbd_increase(rx_ring, 1); 395 396 if (!desc_info->ready) { 397 rtw89_warn(rtwdev, "no rx desc information\n"); 398 goto err_free_resource; 399 } 400 if (ls) { 401 rtw89_core_rx(rtwdev, desc_info, new); 402 rx_ring->diliver_skb = NULL; 403 desc_info->ready = false; 404 } 405 406 return cnt; 407 408 err_sync_device: 409 rtw89_pci_sync_skb_for_device(rtwdev, skb); 410 rtw89_pci_rxbd_increase(rx_ring, 1); 411 err_free_resource: 412 if (new) 413 dev_kfree_skb_any(new); 414 rx_ring->diliver_skb = NULL; 415 desc_info->ready = false; 416 417 return cnt; 418 } 419 420 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 421 struct rtw89_pci_rx_ring *rx_ring, 422 u32 cnt) 423 { 424 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 425 u32 rx_cnt; 426 427 while (cnt && rtwdev->napi_budget_countdown > 0) { 428 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 429 if (!rx_cnt) { 430 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 431 432 /* skip the rest RXBD bufs */ 433 rtw89_pci_rxbd_increase(rx_ring, cnt); 434 break; 435 } 436 437 cnt -= rx_cnt; 438 } 439 440 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 441 } 442 443 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 444 struct rtw89_pci *rtwpci, int budget) 445 { 446 struct rtw89_pci_rx_ring *rx_ring; 447 int countdown = rtwdev->napi_budget_countdown; 448 u32 cnt; 449 450 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 451 452 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 453 if (!cnt) 454 return 0; 455 456 cnt = min_t(u32, budget, cnt); 457 458 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 459 460 /* In case of flushing pending SKBs, the countdown may exceed. */ 461 if (rtwdev->napi_budget_countdown <= 0) 462 return budget; 463 464 return budget - countdown; 465 } 466 467 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 468 struct rtw89_pci_tx_ring *tx_ring, 469 struct sk_buff *skb, u8 tx_status) 470 { 471 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 472 struct ieee80211_tx_info *info; 473 474 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 475 476 info = IEEE80211_SKB_CB(skb); 477 ieee80211_tx_info_clear_status(info); 478 479 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 480 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 481 if (tx_status == RTW89_TX_DONE) { 482 info->flags |= IEEE80211_TX_STAT_ACK; 483 tx_ring->tx_acked++; 484 } else { 485 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 486 rtw89_debug(rtwdev, RTW89_DBG_FW, 487 "failed to TX of status %x\n", tx_status); 488 switch (tx_status) { 489 case RTW89_TX_RETRY_LIMIT: 490 tx_ring->tx_retry_lmt++; 491 break; 492 case RTW89_TX_LIFE_TIME: 493 tx_ring->tx_life_time++; 494 break; 495 case RTW89_TX_MACID_DROP: 496 tx_ring->tx_mac_id_drop++; 497 break; 498 default: 499 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 500 break; 501 } 502 } 503 504 ieee80211_tx_status_ni(rtwdev->hw, skb); 505 } 506 507 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 508 { 509 struct rtw89_pci_tx_wd *txwd; 510 u32 cnt; 511 512 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 513 while (cnt--) { 514 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 515 if (!txwd) { 516 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 517 break; 518 } 519 520 list_del_init(&txwd->list); 521 522 /* this skb has been freed by RPP */ 523 if (skb_queue_len(&txwd->queue) == 0) 524 rtw89_pci_enqueue_txwd(tx_ring, txwd); 525 } 526 } 527 528 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 529 struct rtw89_pci_tx_ring *tx_ring) 530 { 531 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 532 struct rtw89_pci_tx_wd *txwd; 533 int i; 534 535 for (i = 0; i < wd_ring->page_num; i++) { 536 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 537 if (!txwd) 538 break; 539 540 list_del_init(&txwd->list); 541 } 542 } 543 544 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 545 struct rtw89_pci_tx_ring *tx_ring, 546 struct rtw89_pci_tx_wd *txwd, u16 seq, 547 u8 tx_status) 548 { 549 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 550 struct rtw89_pci_tx_data *tx_data; 551 struct sk_buff *skb, *tmp; 552 u8 txch = tx_ring->txch; 553 554 if (!list_empty(&txwd->list)) { 555 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 556 /* In low power mode, RPP can receive before updating of TX BD. 557 * In normal mode, it should not happen so give it a warning. 558 */ 559 if (!rtwpci->low_power && !list_empty(&txwd->list)) 560 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 561 txch, seq); 562 } 563 564 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 565 skb_unlink(skb, &txwd->queue); 566 567 tx_data = RTW89_PCI_TX_SKB_CB(skb); 568 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 569 DMA_TO_DEVICE); 570 571 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 572 } 573 574 if (list_empty(&txwd->list)) 575 rtw89_pci_enqueue_txwd(tx_ring, txwd); 576 } 577 578 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 579 struct rtw89_pci_rpp_fmt *rpp) 580 { 581 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 582 struct rtw89_pci_tx_ring *tx_ring; 583 struct rtw89_pci_tx_wd_ring *wd_ring; 584 struct rtw89_pci_tx_wd *txwd; 585 u16 seq; 586 u8 qsel, tx_status, txch; 587 588 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 589 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 590 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 591 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 592 593 if (txch == RTW89_TXCH_CH12) { 594 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 595 return; 596 } 597 598 tx_ring = &rtwpci->tx_rings[txch]; 599 wd_ring = &tx_ring->wd_ring; 600 txwd = &wd_ring->pages[seq]; 601 602 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 603 } 604 605 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 606 struct rtw89_pci_tx_ring *tx_ring) 607 { 608 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 609 struct rtw89_pci_tx_wd *txwd; 610 int i; 611 612 for (i = 0; i < wd_ring->page_num; i++) { 613 txwd = &wd_ring->pages[i]; 614 615 if (!list_empty(&txwd->list)) 616 continue; 617 618 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 619 } 620 } 621 622 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 623 struct rtw89_pci_rx_ring *rx_ring, 624 u32 max_cnt) 625 { 626 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 627 struct rtw89_pci_rx_info *rx_info; 628 struct rtw89_pci_rpp_fmt *rpp; 629 struct rtw89_rx_desc_info desc_info = {}; 630 struct sk_buff *skb; 631 u32 cnt = 0; 632 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 633 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 634 u32 skb_idx; 635 u32 offset; 636 int ret; 637 638 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 639 skb = rx_ring->buf[skb_idx]; 640 641 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 642 if (ret) { 643 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 644 bd_ring->wp, ret); 645 goto err_sync_device; 646 } 647 648 rx_info = RTW89_PCI_RX_SKB_CB(skb); 649 if (!rx_info->fs || !rx_info->ls) { 650 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 651 return cnt; 652 } 653 654 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 655 656 /* first segment has RX desc */ 657 offset = desc_info.offset + desc_info.rxd_len; 658 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 659 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 660 rtw89_pci_release_rpp(rtwdev, rpp); 661 } 662 663 rtw89_pci_sync_skb_for_device(rtwdev, skb); 664 rtw89_pci_rxbd_increase(rx_ring, 1); 665 cnt++; 666 667 return cnt; 668 669 err_sync_device: 670 rtw89_pci_sync_skb_for_device(rtwdev, skb); 671 return 0; 672 } 673 674 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 675 struct rtw89_pci_rx_ring *rx_ring, 676 u32 cnt) 677 { 678 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 679 u32 release_cnt; 680 681 while (cnt) { 682 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 683 if (!release_cnt) { 684 rtw89_err(rtwdev, "failed to release TX skbs\n"); 685 686 /* skip the rest RXBD bufs */ 687 rtw89_pci_rxbd_increase(rx_ring, cnt); 688 break; 689 } 690 691 cnt -= release_cnt; 692 } 693 694 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 695 } 696 697 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 698 struct rtw89_pci *rtwpci, int budget) 699 { 700 struct rtw89_pci_rx_ring *rx_ring; 701 u32 cnt; 702 int work_done; 703 704 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 705 706 spin_lock_bh(&rtwpci->trx_lock); 707 708 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 709 if (cnt == 0) 710 goto out_unlock; 711 712 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 713 714 out_unlock: 715 spin_unlock_bh(&rtwpci->trx_lock); 716 717 /* always release all RPQ */ 718 work_done = min_t(int, cnt, budget); 719 rtwdev->napi_budget_countdown -= work_done; 720 721 return work_done; 722 } 723 724 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 725 struct rtw89_pci *rtwpci) 726 { 727 struct rtw89_pci_rx_ring *rx_ring; 728 struct rtw89_pci_dma_ring *bd_ring; 729 u32 reg_idx; 730 u16 hw_idx, hw_idx_next, host_idx; 731 int i; 732 733 for (i = 0; i < RTW89_RXCH_NUM; i++) { 734 rx_ring = &rtwpci->rx_rings[i]; 735 bd_ring = &rx_ring->bd_ring; 736 737 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 738 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 739 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 740 hw_idx_next = (hw_idx + 1) % bd_ring->len; 741 742 if (hw_idx_next == host_idx) 743 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 744 745 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 746 "%d RXD unavailable, idx=0x%08x, len=%d\n", 747 i, reg_idx, bd_ring->len); 748 } 749 } 750 751 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 752 struct rtw89_pci *rtwpci, 753 struct rtw89_pci_isrs *isrs) 754 { 755 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 756 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 757 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 758 759 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 760 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 761 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 762 } 763 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 764 765 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 766 struct rtw89_pci *rtwpci, 767 struct rtw89_pci_isrs *isrs) 768 { 769 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 770 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 771 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 772 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 773 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 774 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 775 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 776 777 if (isrs->halt_c2h_isrs) 778 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 779 if (isrs->isrs[0]) 780 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 781 if (isrs->isrs[1]) 782 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 783 } 784 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 785 786 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, 787 struct rtw89_pci *rtwpci, 788 struct rtw89_pci_isrs *isrs) 789 { 790 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 791 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 792 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 793 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ? 794 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0; 795 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; 796 797 if (isrs->halt_c2h_isrs) 798 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 799 if (isrs->isrs[0]) 800 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]); 801 if (isrs->isrs[1]) 802 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 803 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 804 } 805 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); 806 807 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 808 { 809 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 810 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 811 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 812 } 813 EXPORT_SYMBOL(rtw89_pci_enable_intr); 814 815 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 816 { 817 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 818 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 819 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 820 } 821 EXPORT_SYMBOL(rtw89_pci_disable_intr); 822 823 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 824 { 825 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 826 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 827 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 828 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 829 } 830 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 831 832 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 833 { 834 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 835 } 836 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 837 838 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 839 { 840 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 841 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]); 842 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 843 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 844 } 845 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2); 846 847 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 848 { 849 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 850 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 851 } 852 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); 853 854 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 855 { 856 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 857 unsigned long flags; 858 859 spin_lock_irqsave(&rtwpci->irq_lock, flags); 860 rtw89_chip_disable_intr(rtwdev, rtwpci); 861 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 862 rtw89_chip_enable_intr(rtwdev, rtwpci); 863 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 864 } 865 866 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 867 { 868 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 869 unsigned long flags; 870 871 spin_lock_irqsave(&rtwpci->irq_lock, flags); 872 rtw89_chip_disable_intr(rtwdev, rtwpci); 873 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 874 rtw89_chip_enable_intr(rtwdev, rtwpci); 875 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 876 } 877 878 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 879 { 880 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 881 int budget = NAPI_POLL_WEIGHT; 882 883 /* To prevent RXQ get stuck due to run out of budget. */ 884 rtwdev->napi_budget_countdown = budget; 885 886 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 887 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 888 } 889 890 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 891 { 892 struct rtw89_dev *rtwdev = dev; 893 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 894 const struct rtw89_pci_info *info = rtwdev->pci_info; 895 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 896 struct rtw89_pci_isrs isrs; 897 unsigned long flags; 898 899 spin_lock_irqsave(&rtwpci->irq_lock, flags); 900 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 901 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 902 903 if (unlikely(isrs.isrs[0] & gen_def->isr_rdu)) 904 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 905 906 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h)) 907 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 908 909 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout)) 910 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 911 912 if (unlikely(rtwpci->under_recovery)) 913 goto enable_intr; 914 915 if (unlikely(rtwpci->low_power)) { 916 rtw89_pci_low_power_interrupt_handler(rtwdev); 917 goto enable_intr; 918 } 919 920 if (likely(rtwpci->running)) { 921 local_bh_disable(); 922 napi_schedule(&rtwdev->napi); 923 local_bh_enable(); 924 } 925 926 return IRQ_HANDLED; 927 928 enable_intr: 929 spin_lock_irqsave(&rtwpci->irq_lock, flags); 930 if (likely(rtwpci->running)) 931 rtw89_chip_enable_intr(rtwdev, rtwpci); 932 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 933 return IRQ_HANDLED; 934 } 935 936 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 937 { 938 struct rtw89_dev *rtwdev = dev; 939 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 940 unsigned long flags; 941 irqreturn_t irqret = IRQ_WAKE_THREAD; 942 943 spin_lock_irqsave(&rtwpci->irq_lock, flags); 944 945 /* If interrupt event is on the road, it is still trigger interrupt 946 * even we have done pci_stop() to turn off IMR. 947 */ 948 if (unlikely(!rtwpci->running)) { 949 irqret = IRQ_HANDLED; 950 goto exit; 951 } 952 953 rtw89_chip_disable_intr(rtwdev, rtwpci); 954 exit: 955 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 956 957 return irqret; 958 } 959 960 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ 961 [RTW89_TXCH_##ch_idx] = { \ 962 .num = R_##gen##_##txch##_TXBD_NUM ##v, \ 963 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 964 .bdram = 0, \ 965 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \ 966 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \ 967 } 968 969 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 970 [RTW89_TXCH_##txch] = { \ 971 .num = R_AX_##txch##_TXBD_NUM ##v, \ 972 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 973 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 974 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 975 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 976 } 977 978 #define DEF_TXCHADDRS(info, txch, v...) \ 979 [RTW89_TXCH_##txch] = { \ 980 .num = R_AX_##txch##_TXBD_NUM, \ 981 .idx = R_AX_##txch##_TXBD_IDX, \ 982 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 983 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 984 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 985 } 986 987 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ 988 [RTW89_RXCH_##ch_idx] = { \ 989 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \ 990 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \ 991 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \ 992 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \ 993 } 994 995 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 996 .tx = { 997 DEF_TXCHADDRS(info, ACH0), 998 DEF_TXCHADDRS(info, ACH1), 999 DEF_TXCHADDRS(info, ACH2), 1000 DEF_TXCHADDRS(info, ACH3), 1001 DEF_TXCHADDRS(info, ACH4), 1002 DEF_TXCHADDRS(info, ACH5), 1003 DEF_TXCHADDRS(info, ACH6), 1004 DEF_TXCHADDRS(info, ACH7), 1005 DEF_TXCHADDRS(info, CH8), 1006 DEF_TXCHADDRS(info, CH9), 1007 DEF_TXCHADDRS_TYPE1(info, CH10), 1008 DEF_TXCHADDRS_TYPE1(info, CH11), 1009 DEF_TXCHADDRS(info, CH12), 1010 }, 1011 .rx = { 1012 DEF_RXCHADDRS(AX, RXQ, RXQ), 1013 DEF_RXCHADDRS(AX, RPQ, RPQ), 1014 }, 1015 }; 1016 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 1017 1018 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 1019 .tx = { 1020 DEF_TXCHADDRS(info, ACH0, _V1), 1021 DEF_TXCHADDRS(info, ACH1, _V1), 1022 DEF_TXCHADDRS(info, ACH2, _V1), 1023 DEF_TXCHADDRS(info, ACH3, _V1), 1024 DEF_TXCHADDRS(info, ACH4, _V1), 1025 DEF_TXCHADDRS(info, ACH5, _V1), 1026 DEF_TXCHADDRS(info, ACH6, _V1), 1027 DEF_TXCHADDRS(info, ACH7, _V1), 1028 DEF_TXCHADDRS(info, CH8, _V1), 1029 DEF_TXCHADDRS(info, CH9, _V1), 1030 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 1031 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 1032 DEF_TXCHADDRS(info, CH12, _V1), 1033 }, 1034 .rx = { 1035 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1), 1036 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1), 1037 }, 1038 }; 1039 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 1040 1041 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = { 1042 .tx = { 1043 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1), 1044 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1), 1045 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1), 1046 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1), 1047 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1), 1048 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1), 1049 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1), 1050 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1), 1051 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1), 1052 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1), 1053 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1), 1054 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1), 1055 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1), 1056 }, 1057 .rx = { 1058 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1), 1059 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1), 1060 }, 1061 }; 1062 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); 1063 1064 #undef DEF_TXCHADDRS_TYPE1 1065 #undef DEF_TXCHADDRS 1066 #undef DEF_RXCHADDRS 1067 1068 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 1069 enum rtw89_tx_channel txch, 1070 const struct rtw89_pci_ch_dma_addr **addr) 1071 { 1072 const struct rtw89_pci_info *info = rtwdev->pci_info; 1073 1074 if (txch >= RTW89_TXCH_NUM) 1075 return -EINVAL; 1076 1077 *addr = &info->dma_addr_set->tx[txch]; 1078 1079 return 0; 1080 } 1081 1082 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 1083 enum rtw89_rx_channel rxch, 1084 const struct rtw89_pci_ch_dma_addr **addr) 1085 { 1086 const struct rtw89_pci_info *info = rtwdev->pci_info; 1087 1088 if (rxch >= RTW89_RXCH_NUM) 1089 return -EINVAL; 1090 1091 *addr = &info->dma_addr_set->rx[rxch]; 1092 1093 return 0; 1094 } 1095 1096 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 1097 { 1098 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 1099 1100 /* reserved 1 desc check ring is full or not */ 1101 if (bd_ring->rp > bd_ring->wp) 1102 return bd_ring->rp - bd_ring->wp - 1; 1103 1104 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 1105 } 1106 1107 static 1108 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 1109 { 1110 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 1112 u32 cnt; 1113 1114 spin_lock_bh(&rtwpci->trx_lock); 1115 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 1116 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1117 spin_unlock_bh(&rtwpci->trx_lock); 1118 1119 return cnt; 1120 } 1121 1122 static 1123 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 1124 u8 txch) 1125 { 1126 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1127 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1128 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1129 u32 cnt; 1130 1131 spin_lock_bh(&rtwpci->trx_lock); 1132 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1133 if (txch != RTW89_TXCH_CH12) 1134 cnt = min(cnt, wd_ring->curr_num); 1135 spin_unlock_bh(&rtwpci->trx_lock); 1136 1137 return cnt; 1138 } 1139 1140 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1141 u8 txch) 1142 { 1143 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1144 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1145 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1146 const struct rtw89_chip_info *chip = rtwdev->chip; 1147 u32 bd_cnt, wd_cnt, min_cnt = 0; 1148 struct rtw89_pci_rx_ring *rx_ring; 1149 enum rtw89_debug_mask debug_mask; 1150 u32 cnt; 1151 1152 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 1153 1154 spin_lock_bh(&rtwpci->trx_lock); 1155 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1156 wd_cnt = wd_ring->curr_num; 1157 1158 if (wd_cnt == 0 || bd_cnt == 0) { 1159 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 1160 if (cnt) 1161 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 1162 else if (wd_cnt == 0) 1163 goto out_unlock; 1164 1165 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1166 if (bd_cnt == 0) 1167 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 1168 } 1169 1170 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1171 wd_cnt = wd_ring->curr_num; 1172 min_cnt = min(bd_cnt, wd_cnt); 1173 if (min_cnt == 0) { 1174 /* This message can be frequently shown in low power mode or 1175 * high traffic with small FIFO chips, and we have recognized it as normal 1176 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1177 */ 1178 if (rtwpci->low_power || chip->small_fifo_size) 1179 debug_mask = RTW89_DBG_TXRX; 1180 else 1181 debug_mask = RTW89_DBG_UNEXP; 1182 1183 rtw89_debug(rtwdev, debug_mask, 1184 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1185 wd_cnt, bd_cnt); 1186 } 1187 1188 out_unlock: 1189 spin_unlock_bh(&rtwpci->trx_lock); 1190 1191 return min_cnt; 1192 } 1193 1194 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1195 u8 txch) 1196 { 1197 if (rtwdev->hci.paused) 1198 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1199 1200 if (txch == RTW89_TXCH_CH12) 1201 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1202 1203 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1204 } 1205 1206 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1207 { 1208 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1209 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1210 u32 host_idx, addr; 1211 1212 spin_lock_bh(&rtwpci->trx_lock); 1213 1214 addr = bd_ring->addr.idx; 1215 host_idx = bd_ring->wp; 1216 rtw89_write16(rtwdev, addr, host_idx); 1217 1218 spin_unlock_bh(&rtwpci->trx_lock); 1219 } 1220 1221 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1222 int n_txbd) 1223 { 1224 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1225 u32 host_idx, len; 1226 1227 len = bd_ring->len; 1228 host_idx = bd_ring->wp + n_txbd; 1229 host_idx = host_idx < len ? host_idx : host_idx - len; 1230 1231 bd_ring->wp = host_idx; 1232 } 1233 1234 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1235 { 1236 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1237 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1238 1239 if (rtwdev->hci.paused) { 1240 set_bit(txch, rtwpci->kick_map); 1241 return; 1242 } 1243 1244 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1245 } 1246 1247 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1248 { 1249 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1250 struct rtw89_pci_tx_ring *tx_ring; 1251 int txch; 1252 1253 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1254 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1255 continue; 1256 1257 tx_ring = &rtwpci->tx_rings[txch]; 1258 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1259 } 1260 } 1261 1262 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1263 { 1264 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1265 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1266 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1267 u32 cur_idx, cur_rp; 1268 u8 i; 1269 1270 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1271 * define a reasonable fixed total timeout to use read_poll_timeout* 1272 * helper. Instead, we can ensure a reasonable polling times, so we 1273 * just use for loop with udelay here. 1274 */ 1275 for (i = 0; i < 60; i++) { 1276 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1277 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1278 if (cur_rp == bd_ring->wp) 1279 return; 1280 1281 udelay(1); 1282 } 1283 1284 if (!drop) 1285 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1286 } 1287 1288 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1289 bool drop) 1290 { 1291 const struct rtw89_pci_info *info = rtwdev->pci_info; 1292 u8 i; 1293 1294 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1295 /* It may be unnecessary to flush FWCMD queue. */ 1296 if (i == RTW89_TXCH_CH12) 1297 continue; 1298 if (info->tx_dma_ch_mask & BIT(i)) 1299 continue; 1300 1301 if (txchs & BIT(i)) 1302 __pci_flush_txch(rtwdev, i, drop); 1303 } 1304 } 1305 1306 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1307 bool drop) 1308 { 1309 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1310 } 1311 1312 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1313 void *txaddr_info_addr, u32 total_len, 1314 dma_addr_t dma, u8 *add_info_nr) 1315 { 1316 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1317 __le16 option; 1318 1319 txaddr_info->length = cpu_to_le16(total_len); 1320 option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1)); 1321 option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK); 1322 txaddr_info->option = option; 1323 txaddr_info->dma = cpu_to_le32(dma); 1324 1325 *add_info_nr = 1; 1326 1327 return sizeof(*txaddr_info); 1328 } 1329 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1330 1331 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1332 void *txaddr_info_addr, u32 total_len, 1333 dma_addr_t dma, u8 *add_info_nr) 1334 { 1335 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1336 u32 remain = total_len; 1337 u32 len; 1338 u16 length_option; 1339 int n; 1340 1341 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1342 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1343 TXADDR_INFO_LENTHG_V1_MAX : remain; 1344 remain -= len; 1345 1346 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1347 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1348 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1349 length_option |= u16_encode_bits(upper_32_bits(dma), 1350 B_PCIADDR_HIGH_SEL_V1_MASK); 1351 txaddr_info->length_opt = cpu_to_le16(length_option); 1352 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1353 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1354 1355 dma += len; 1356 txaddr_info++; 1357 } 1358 1359 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1360 remain, total_len); 1361 1362 *add_info_nr = n; 1363 1364 return n * sizeof(*txaddr_info); 1365 } 1366 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1367 1368 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1369 struct rtw89_pci_tx_ring *tx_ring, 1370 struct rtw89_pci_tx_wd *txwd, 1371 struct rtw89_core_tx_request *tx_req) 1372 { 1373 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1374 const struct rtw89_chip_info *chip = rtwdev->chip; 1375 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1376 struct rtw89_pci_tx_wp_info *txwp_info; 1377 void *txaddr_info_addr; 1378 struct pci_dev *pdev = rtwpci->pdev; 1379 struct sk_buff *skb = tx_req->skb; 1380 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1381 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1382 bool en_wd_info = desc_info->en_wd_info; 1383 u32 txwd_len; 1384 u32 txwp_len; 1385 u32 txaddr_info_len; 1386 dma_addr_t dma; 1387 int ret; 1388 1389 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1390 if (dma_mapping_error(&pdev->dev, dma)) { 1391 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1392 ret = -EBUSY; 1393 goto err; 1394 } 1395 1396 tx_data->dma = dma; 1397 rcu_assign_pointer(skb_data->wait, NULL); 1398 1399 txwp_len = sizeof(*txwp_info); 1400 txwd_len = chip->txwd_body_size; 1401 txwd_len += en_wd_info ? chip->txwd_info_size : 0; 1402 1403 #if defined(__linux__) 1404 txwp_info = txwd->vaddr + txwd_len; 1405 #elif defined(__FreeBSD__) 1406 txwp_info = (struct rtw89_pci_tx_wp_info *)((u8 *)txwd->vaddr + txwd_len); 1407 #endif 1408 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1409 txwp_info->seq1 = 0; 1410 txwp_info->seq2 = 0; 1411 txwp_info->seq3 = 0; 1412 1413 tx_ring->tx_cnt++; 1414 #if defined(__linux__) 1415 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1416 #elif defined(__FreeBSD__) 1417 txaddr_info_addr = (u8 *)txwd->vaddr + txwd_len + txwp_len; 1418 #endif 1419 txaddr_info_len = 1420 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1421 dma, &desc_info->addr_info_nr); 1422 1423 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1424 1425 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1426 1427 skb_queue_tail(&txwd->queue, skb); 1428 1429 return 0; 1430 1431 err: 1432 return ret; 1433 } 1434 1435 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1436 struct rtw89_pci_tx_ring *tx_ring, 1437 struct rtw89_pci_tx_bd_32 *txbd, 1438 struct rtw89_core_tx_request *tx_req) 1439 { 1440 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1441 const struct rtw89_chip_info *chip = rtwdev->chip; 1442 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1443 void *txdesc; 1444 int txdesc_size = chip->h2c_desc_size; 1445 struct pci_dev *pdev = rtwpci->pdev; 1446 struct sk_buff *skb = tx_req->skb; 1447 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1448 dma_addr_t dma; 1449 __le16 opt; 1450 1451 txdesc = skb_push(skb, txdesc_size); 1452 memset(txdesc, 0, txdesc_size); 1453 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1454 1455 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1456 if (dma_mapping_error(&pdev->dev, dma)) { 1457 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1458 return -EBUSY; 1459 } 1460 1461 tx_data->dma = dma; 1462 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1463 opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI); 1464 txbd->opt = opt; 1465 txbd->length = cpu_to_le16(skb->len); 1466 txbd->dma = cpu_to_le32(tx_data->dma); 1467 skb_queue_tail(&rtwpci->h2c_queue, skb); 1468 1469 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1470 1471 return 0; 1472 } 1473 1474 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1475 struct rtw89_pci_tx_ring *tx_ring, 1476 struct rtw89_pci_tx_bd_32 *txbd, 1477 struct rtw89_core_tx_request *tx_req) 1478 { 1479 struct rtw89_pci_tx_wd *txwd; 1480 __le16 opt; 1481 int ret; 1482 1483 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1484 * buffer with WD BODY only. So here we don't need to check the free 1485 * pages of the wd ring. 1486 */ 1487 if (tx_ring->txch == RTW89_TXCH_CH12) 1488 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1489 1490 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1491 if (!txwd) { 1492 rtw89_err(rtwdev, "no available TXWD\n"); 1493 ret = -ENOSPC; 1494 goto err; 1495 } 1496 1497 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1498 if (ret) { 1499 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1500 goto err_enqueue_wd; 1501 } 1502 1503 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1504 1505 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1506 opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI); 1507 txbd->opt = opt; 1508 txbd->length = cpu_to_le16(txwd->len); 1509 txbd->dma = cpu_to_le32(txwd->paddr); 1510 1511 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1512 1513 return 0; 1514 1515 err_enqueue_wd: 1516 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1517 err: 1518 return ret; 1519 } 1520 1521 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1522 u8 txch) 1523 { 1524 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1525 struct rtw89_pci_tx_ring *tx_ring; 1526 struct rtw89_pci_tx_bd_32 *txbd; 1527 u32 n_avail_txbd; 1528 int ret = 0; 1529 1530 /* check the tx type and dma channel for fw cmd queue */ 1531 if ((txch == RTW89_TXCH_CH12 || 1532 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1533 (txch != RTW89_TXCH_CH12 || 1534 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1535 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1536 return -EINVAL; 1537 } 1538 1539 tx_ring = &rtwpci->tx_rings[txch]; 1540 spin_lock_bh(&rtwpci->trx_lock); 1541 1542 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1543 if (n_avail_txbd == 0) { 1544 rtw89_err(rtwdev, "no available TXBD\n"); 1545 ret = -ENOSPC; 1546 goto err_unlock; 1547 } 1548 1549 txbd = rtw89_pci_get_next_txbd(tx_ring); 1550 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1551 if (ret) { 1552 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1553 goto err_unlock; 1554 } 1555 1556 spin_unlock_bh(&rtwpci->trx_lock); 1557 return 0; 1558 1559 err_unlock: 1560 spin_unlock_bh(&rtwpci->trx_lock); 1561 return ret; 1562 } 1563 1564 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1565 { 1566 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1567 int ret; 1568 1569 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1570 if (ret) { 1571 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1572 return ret; 1573 } 1574 1575 return 0; 1576 } 1577 1578 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1579 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1580 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1581 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1582 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1583 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1584 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1585 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1586 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1587 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1588 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1589 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1590 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1591 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1592 }; 1593 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1594 1595 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1596 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1597 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1598 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1599 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1600 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1601 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1602 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1603 }; 1604 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1605 1606 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev) 1607 { 1608 const struct rtw89_pci_info *info = rtwdev->pci_info; 1609 u32 addr = info->wp_sel_addr; 1610 u32 val; 1611 int i; 1612 1613 if (!info->wp_sel_addr) 1614 return; 1615 1616 for (i = 0; i < 16; i += 4) { 1617 val = u32_encode_bits(i + 0, MASKBYTE0) | 1618 u32_encode_bits(i + 1, MASKBYTE1) | 1619 u32_encode_bits(i + 2, MASKBYTE2) | 1620 u32_encode_bits(i + 3, MASKBYTE3); 1621 rtw89_write32(rtwdev, addr + i, val); 1622 } 1623 } 1624 1625 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1626 { 1627 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1628 const struct rtw89_pci_info *info = rtwdev->pci_info; 1629 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1630 struct rtw89_pci_tx_ring *tx_ring; 1631 struct rtw89_pci_rx_ring *rx_ring; 1632 struct rtw89_pci_dma_ring *bd_ring; 1633 const struct rtw89_pci_bd_ram *bd_ram; 1634 u32 addr_num; 1635 u32 addr_idx; 1636 u32 addr_bdram; 1637 u32 addr_desa_l; 1638 u32 val32; 1639 int i; 1640 1641 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1642 if (info->tx_dma_ch_mask & BIT(i)) 1643 continue; 1644 1645 tx_ring = &rtwpci->tx_rings[i]; 1646 bd_ring = &tx_ring->bd_ring; 1647 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; 1648 addr_num = bd_ring->addr.num; 1649 addr_bdram = bd_ring->addr.bdram; 1650 addr_desa_l = bd_ring->addr.desa_l; 1651 bd_ring->wp = 0; 1652 bd_ring->rp = 0; 1653 1654 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1655 if (addr_bdram && bd_ram) { 1656 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1657 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1658 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1659 1660 rtw89_write32(rtwdev, addr_bdram, val32); 1661 } 1662 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1663 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1664 } 1665 1666 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1667 rx_ring = &rtwpci->rx_rings[i]; 1668 bd_ring = &rx_ring->bd_ring; 1669 addr_num = bd_ring->addr.num; 1670 addr_idx = bd_ring->addr.idx; 1671 addr_desa_l = bd_ring->addr.desa_l; 1672 if (info->rx_ring_eq_is_full) 1673 bd_ring->wp = bd_ring->len - 1; 1674 else 1675 bd_ring->wp = 0; 1676 bd_ring->rp = 0; 1677 rx_ring->diliver_skb = NULL; 1678 rx_ring->diliver_desc.ready = false; 1679 rx_ring->target_rx_tag = 0; 1680 1681 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1682 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1683 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1684 1685 if (info->rx_ring_eq_is_full) 1686 rtw89_write16(rtwdev, addr_idx, bd_ring->wp); 1687 } 1688 1689 rtw89_pci_init_wp_16sel(rtwdev); 1690 } 1691 1692 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1693 struct rtw89_pci_tx_ring *tx_ring) 1694 { 1695 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1696 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1697 } 1698 1699 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1700 { 1701 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1702 const struct rtw89_pci_info *info = rtwdev->pci_info; 1703 int txch; 1704 1705 rtw89_pci_reset_trx_rings(rtwdev); 1706 1707 spin_lock_bh(&rtwpci->trx_lock); 1708 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1709 if (info->tx_dma_ch_mask & BIT(txch)) 1710 continue; 1711 if (txch == RTW89_TXCH_CH12) { 1712 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1713 skb_queue_len(&rtwpci->h2c_queue), true); 1714 continue; 1715 } 1716 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1717 } 1718 spin_unlock_bh(&rtwpci->trx_lock); 1719 } 1720 1721 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1722 { 1723 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1724 unsigned long flags; 1725 1726 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1727 rtwpci->running = true; 1728 rtw89_chip_enable_intr(rtwdev, rtwpci); 1729 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1730 } 1731 1732 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1733 { 1734 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1735 unsigned long flags; 1736 1737 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1738 rtwpci->running = false; 1739 rtw89_chip_disable_intr(rtwdev, rtwpci); 1740 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1741 } 1742 1743 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1744 { 1745 rtw89_core_napi_start(rtwdev); 1746 rtw89_pci_enable_intr_lock(rtwdev); 1747 1748 return 0; 1749 } 1750 1751 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1752 { 1753 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1754 struct pci_dev *pdev = rtwpci->pdev; 1755 1756 rtw89_pci_disable_intr_lock(rtwdev); 1757 synchronize_irq(pdev->irq); 1758 rtw89_core_napi_stop(rtwdev); 1759 } 1760 1761 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1762 { 1763 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1764 struct pci_dev *pdev = rtwpci->pdev; 1765 1766 if (pause) { 1767 rtw89_pci_disable_intr_lock(rtwdev); 1768 synchronize_irq(pdev->irq); 1769 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1770 napi_synchronize(&rtwdev->napi); 1771 } else { 1772 rtw89_pci_enable_intr_lock(rtwdev); 1773 rtw89_pci_tx_kick_off_pending(rtwdev); 1774 } 1775 } 1776 1777 static 1778 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1779 { 1780 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1781 const struct rtw89_pci_info *info = rtwdev->pci_info; 1782 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1783 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1784 struct rtw89_pci_tx_ring *tx_ring; 1785 struct rtw89_pci_rx_ring *rx_ring; 1786 int i; 1787 1788 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1789 return; 1790 1791 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1792 tx_ring = &rtwpci->tx_rings[i]; 1793 tx_ring->bd_ring.addr.idx = low_power ? 1794 bd_idx_addr->tx_bd_addrs[i] : 1795 dma_addr_set->tx[i].idx; 1796 } 1797 1798 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1799 rx_ring = &rtwpci->rx_rings[i]; 1800 rx_ring->bd_ring.addr.idx = low_power ? 1801 bd_idx_addr->rx_bd_addrs[i] : 1802 dma_addr_set->rx[i].idx; 1803 } 1804 } 1805 1806 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1807 { 1808 enum rtw89_pci_intr_mask_cfg cfg; 1809 1810 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1811 1812 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1813 rtw89_chip_config_intr_mask(rtwdev, cfg); 1814 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1815 } 1816 1817 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1818 1819 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1820 { 1821 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1822 #if defined(__linux__) 1823 u32 val = readl(rtwpci->mmap + addr); 1824 #elif defined(__FreeBSD__) 1825 u32 val; 1826 1827 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1828 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1829 #endif 1830 int count; 1831 1832 for (count = 0; ; count++) { 1833 if (val != RTW89_R32_DEAD) 1834 return val; 1835 if (count >= MAC_REG_POOL_COUNT) { 1836 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1837 return RTW89_R32_DEAD; 1838 } 1839 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1840 #if defined(__linux__) 1841 val = readl(rtwpci->mmap + addr); 1842 #elif defined(__FreeBSD__) 1843 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1844 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1845 #endif 1846 } 1847 1848 return val; 1849 } 1850 1851 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1852 { 1853 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1854 u32 addr32, val32, shift; 1855 1856 if (!ACCESS_CMAC(addr)) 1857 #if defined(__linux__) 1858 return readb(rtwpci->mmap + addr); 1859 #elif defined(__FreeBSD__) 1860 { 1861 u8 val; 1862 1863 val = bus_read_1((struct resource *)rtwpci->mmap, addr); 1864 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); 1865 return (val); 1866 } 1867 #endif 1868 1869 addr32 = addr & ~0x3; 1870 shift = (addr & 0x3) * 8; 1871 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1872 return val32 >> shift; 1873 } 1874 1875 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1876 { 1877 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1878 u32 addr32, val32, shift; 1879 1880 if (!ACCESS_CMAC(addr)) 1881 #if defined(__linux__) 1882 return readw(rtwpci->mmap + addr); 1883 #elif defined(__FreeBSD__) 1884 { 1885 u16 val; 1886 1887 val = bus_read_2((struct resource *)rtwpci->mmap, addr); 1888 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); 1889 return (val); 1890 } 1891 #endif 1892 1893 addr32 = addr & ~0x3; 1894 shift = (addr & 0x3) * 8; 1895 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1896 return val32 >> shift; 1897 } 1898 1899 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1900 { 1901 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1902 1903 if (!ACCESS_CMAC(addr)) 1904 #if defined(__linux__) 1905 return readl(rtwpci->mmap + addr); 1906 #elif defined(__FreeBSD__) 1907 { 1908 u32 val; 1909 1910 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1911 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1912 return (val); 1913 } 1914 #endif 1915 1916 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1917 } 1918 1919 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1920 { 1921 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1922 1923 #if defined(__linux__) 1924 writeb(data, rtwpci->mmap + addr); 1925 #elif defined(__FreeBSD__) 1926 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, data); 1927 return (bus_write_1((struct resource *)rtwpci->mmap, addr, data)); 1928 #endif 1929 } 1930 1931 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1932 { 1933 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1934 1935 #if defined(__linux__) 1936 writew(data, rtwpci->mmap + addr); 1937 #elif defined(__FreeBSD__) 1938 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, data); 1939 return (bus_write_2((struct resource *)rtwpci->mmap, addr, data)); 1940 #endif 1941 } 1942 1943 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1944 { 1945 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1946 1947 #if defined(__linux__) 1948 writel(data, rtwpci->mmap + addr); 1949 #elif defined(__FreeBSD__) 1950 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, data); 1951 return (bus_write_4((struct resource *)rtwpci->mmap, addr, data)); 1952 #endif 1953 } 1954 1955 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1956 { 1957 const struct rtw89_pci_info *info = rtwdev->pci_info; 1958 1959 if (enable) 1960 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1961 info->rxhci_en_bit | info->txhci_en_bit); 1962 else 1963 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1964 info->rxhci_en_bit | info->txhci_en_bit); 1965 } 1966 1967 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1968 { 1969 const struct rtw89_pci_info *info = rtwdev->pci_info; 1970 const struct rtw89_reg_def *reg = &info->dma_io_stop; 1971 1972 if (enable) 1973 rtw89_write32_clr(rtwdev, reg->addr, reg->mask); 1974 else 1975 rtw89_write32_set(rtwdev, reg->addr, reg->mask); 1976 } 1977 1978 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1979 { 1980 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1981 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1982 } 1983 1984 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1985 { 1986 u16 val; 1987 1988 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1989 1990 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1991 switch (speed) { 1992 case PCIE_PHY_GEN1: 1993 if (addr < 0x20) 1994 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1995 else 1996 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1997 break; 1998 case PCIE_PHY_GEN2: 1999 if (addr < 0x20) 2000 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 2001 else 2002 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 2003 break; 2004 default: 2005 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 2006 return -EINVAL; 2007 } 2008 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 2009 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 2010 2011 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 2012 false, rtwdev, R_AX_MDIO_CFG); 2013 } 2014 2015 static int 2016 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 2017 { 2018 int ret; 2019 2020 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 2021 if (ret) { 2022 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 2023 return ret; 2024 } 2025 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 2026 2027 return 0; 2028 } 2029 2030 static int 2031 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 2032 { 2033 int ret; 2034 2035 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 2036 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 2037 if (ret) { 2038 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 2039 return ret; 2040 } 2041 2042 return 0; 2043 } 2044 2045 static int 2046 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 2047 { 2048 u32 shift; 2049 int ret; 2050 u16 val; 2051 2052 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2053 if (ret) 2054 return ret; 2055 2056 shift = __ffs(mask); 2057 val &= ~mask; 2058 val |= ((data << shift) & mask); 2059 2060 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 2061 if (ret) 2062 return ret; 2063 2064 return 0; 2065 } 2066 2067 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2068 { 2069 int ret; 2070 u16 val; 2071 2072 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2073 if (ret) 2074 return ret; 2075 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 2076 if (ret) 2077 return ret; 2078 2079 return 0; 2080 } 2081 2082 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2083 { 2084 int ret; 2085 u16 val; 2086 2087 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2088 if (ret) 2089 return ret; 2090 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 2091 if (ret) 2092 return ret; 2093 2094 return 0; 2095 } 2096 2097 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) 2098 { 2099 u16 addr_2lsb = addr & B_AX_DBI_2LSB; 2100 u16 write_addr; 2101 u8 flag; 2102 int ret; 2103 2104 write_addr = addr & B_AX_DBI_ADDR_MSK; 2105 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK); 2106 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data); 2107 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); 2108 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); 2109 2110 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2111 10 * RTW89_PCI_WR_RETRY_CNT, false, 2112 rtwdev, R_AX_DBI_FLAG + 2); 2113 if (ret) 2114 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n", 2115 addr); 2116 2117 return ret; 2118 } 2119 2120 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) 2121 { 2122 u16 read_addr = addr & B_AX_DBI_ADDR_MSK; 2123 u8 flag; 2124 int ret; 2125 2126 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); 2127 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); 2128 2129 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2130 10 * RTW89_PCI_WR_RETRY_CNT, false, 2131 rtwdev, R_AX_DBI_FLAG + 2); 2132 if (ret) { 2133 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n", 2134 addr); 2135 return ret; 2136 } 2137 2138 read_addr = R_AX_DBI_RDATA + (addr & 3); 2139 *value = rtw89_read8(rtwdev, read_addr); 2140 2141 return 0; 2142 } 2143 2144 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2145 u8 data) 2146 { 2147 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2148 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2149 struct pci_dev *pdev = rtwpci->pdev; 2150 int ret; 2151 2152 ret = pci_write_config_byte(pdev, addr, data); 2153 if (!ret) 2154 return 0; 2155 2156 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2157 ret = rtw89_dbi_write8(rtwdev, addr, data); 2158 2159 return ret; 2160 } 2161 2162 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2163 u8 *value) 2164 { 2165 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2166 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2167 struct pci_dev *pdev = rtwpci->pdev; 2168 int ret; 2169 2170 ret = pci_read_config_byte(pdev, addr, value); 2171 if (!ret) 2172 return 0; 2173 2174 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2175 ret = rtw89_dbi_read8(rtwdev, addr, value); 2176 2177 return ret; 2178 } 2179 2180 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 2181 u8 bit) 2182 { 2183 u8 value; 2184 int ret; 2185 2186 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2187 if (ret) 2188 return ret; 2189 2190 value |= bit; 2191 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2192 2193 return ret; 2194 } 2195 2196 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 2197 u8 bit) 2198 { 2199 u8 value; 2200 int ret; 2201 2202 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2203 if (ret) 2204 return ret; 2205 2206 value &= ~bit; 2207 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2208 2209 return ret; 2210 } 2211 2212 static int 2213 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 2214 { 2215 u16 val, tar; 2216 int ret; 2217 2218 /* Enable counter */ 2219 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 2220 if (ret) 2221 return ret; 2222 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2223 phy_rate); 2224 if (ret) 2225 return ret; 2226 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 2227 phy_rate); 2228 if (ret) 2229 return ret; 2230 2231 fsleep(300); 2232 2233 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 2234 if (ret) 2235 return ret; 2236 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2237 phy_rate); 2238 if (ret) 2239 return ret; 2240 2241 tar = tar & 0x0FFF; 2242 if (tar == 0 || tar == 0x0FFF) { 2243 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 2244 return -EINVAL; 2245 } 2246 2247 *target = tar; 2248 2249 return 0; 2250 } 2251 2252 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 2253 { 2254 int ret; 2255 2256 if (!rtw89_is_rtl885xb(rtwdev)) 2257 return 0; 2258 2259 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 2260 PCIE_AUTOK_4, PCIE_PHY_GEN1); 2261 return ret; 2262 } 2263 2264 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 2265 { 2266 enum rtw89_pcie_phy phy_rate; 2267 u16 val16, mgn_set, div_set, tar; 2268 u8 val8, bdr_ori; 2269 bool l1_flag = false; 2270 int ret = 0; 2271 2272 if (!rtw89_is_rtl885xb(rtwdev)) 2273 return 0; 2274 2275 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 2276 if (ret) { 2277 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 2278 RTW89_PCIE_PHY_RATE); 2279 return ret; 2280 } 2281 2282 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 2283 phy_rate = PCIE_PHY_GEN1; 2284 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 2285 phy_rate = PCIE_PHY_GEN2; 2286 } else { 2287 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 2288 return -EOPNOTSUPP; 2289 } 2290 /* Disable L1BD */ 2291 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 2292 if (ret) { 2293 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 2294 return ret; 2295 } 2296 2297 if (bdr_ori & RTW89_PCIE_BIT_L1) { 2298 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2299 bdr_ori & ~RTW89_PCIE_BIT_L1); 2300 if (ret) { 2301 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2302 RTW89_PCIE_L1_CTRL); 2303 return ret; 2304 } 2305 l1_flag = true; 2306 } 2307 2308 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2309 if (ret) { 2310 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2311 goto end; 2312 } 2313 2314 if (val16 & B_AX_CALIB_EN) { 2315 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 2316 val16 & ~B_AX_CALIB_EN, phy_rate); 2317 if (ret) { 2318 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2319 goto end; 2320 } 2321 } 2322 2323 if (!autook_en) 2324 goto end; 2325 /* Set div */ 2326 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 2327 if (ret) { 2328 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2329 goto end; 2330 } 2331 2332 /* Obtain div and margin */ 2333 ret = __get_target(rtwdev, &tar, phy_rate); 2334 if (ret) { 2335 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2336 goto end; 2337 } 2338 2339 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2340 2341 if (mgn_set >= 128) { 2342 div_set = 0x0003; 2343 mgn_set = 0x000F; 2344 } else if (mgn_set >= 64) { 2345 div_set = 0x0003; 2346 mgn_set >>= 3; 2347 } else if (mgn_set >= 32) { 2348 div_set = 0x0002; 2349 mgn_set >>= 2; 2350 } else if (mgn_set >= 16) { 2351 div_set = 0x0001; 2352 mgn_set >>= 1; 2353 } else if (mgn_set == 0) { 2354 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2355 goto end; 2356 } else { 2357 div_set = 0x0000; 2358 } 2359 2360 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2361 if (ret) { 2362 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2363 goto end; 2364 } 2365 2366 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2367 2368 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2369 if (ret) { 2370 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2371 goto end; 2372 } 2373 2374 ret = __get_target(rtwdev, &tar, phy_rate); 2375 if (ret) { 2376 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2377 goto end; 2378 } 2379 2380 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2381 tar, div_set, mgn_set); 2382 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2383 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2384 if (ret) { 2385 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2386 goto end; 2387 } 2388 2389 /* Enable function */ 2390 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2391 if (ret) { 2392 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2393 goto end; 2394 } 2395 2396 /* CLK delay = 0 */ 2397 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2398 PCIE_CLKDLY_HW_0); 2399 2400 end: 2401 /* Set L1BD to ori */ 2402 if (l1_flag) { 2403 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2404 bdr_ori); 2405 if (ret) { 2406 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2407 RTW89_PCIE_L1_CTRL); 2408 return ret; 2409 } 2410 } 2411 2412 return ret; 2413 } 2414 2415 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2416 { 2417 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2418 int ret; 2419 2420 if (chip_id == RTL8852A) { 2421 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2422 PCIE_PHY_GEN1); 2423 if (ret) 2424 return ret; 2425 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2426 PCIE_PHY_GEN2); 2427 if (ret) 2428 return ret; 2429 } else if (chip_id == RTL8852C) { 2430 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2431 B_AX_DEGLITCH); 2432 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2433 B_AX_DEGLITCH); 2434 } 2435 2436 return 0; 2437 } 2438 2439 static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev) 2440 { 2441 u16 g1_oobs, g2_oobs; 2442 u32 backup_aspm; 2443 u32 phy_offset; 2444 u16 offset_cal; 2445 u16 oobs_val; 2446 int ret; 2447 u8 gen; 2448 2449 if (rtwdev->chip->chip_id != RTL8852C) 2450 return; 2451 2452 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2453 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2454 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + 2455 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2456 if (g1_oobs && g2_oobs) 2457 return; 2458 2459 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); 2460 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 2461 2462 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); 2463 if (ret) 2464 goto out; 2465 2466 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN); 2467 rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); 2468 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); 2469 2470 oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, 2471 OOBS_LEVEL_MASK); 2472 2473 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, 2474 OOBS_SEN_MASK, oobs_val); 2475 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, 2476 BAC_OOBS_SEL); 2477 2478 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, 2479 OOBS_SEN_MASK, oobs_val); 2480 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, 2481 BAC_OOBS_SEL); 2482 2483 /* offset K */ 2484 for (gen = 1; gen <= 2; gen++) { 2485 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 : 2486 R_RAC_DIRECT_OFFSET_G2; 2487 2488 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 2489 B_PCIE_BIT_RD_SEL); 2490 } 2491 2492 offset_cal = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2493 RAC_ANA1F * RAC_MULT, OFFSET_CAL_MASK); 2494 2495 for (gen = 1; gen <= 2; gen++) { 2496 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 : 2497 R_RAC_DIRECT_OFFSET_G2; 2498 2499 rtw89_write16_mask(rtwdev, phy_offset + RAC_ANA0B * RAC_MULT, 2500 MANUAL_LVL_MASK, offset_cal); 2501 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, 2502 OFFSET_CAL_MODE); 2503 } 2504 2505 out: 2506 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm); 2507 } 2508 2509 static void rtw89_pci_ber(struct rtw89_dev *rtwdev) 2510 { 2511 u32 phy_offset; 2512 2513 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks)) 2514 return; 2515 2516 phy_offset = R_RAC_DIRECT_OFFSET_G1; 2517 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL); 2518 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2519 2520 phy_offset = R_RAC_DIRECT_OFFSET_G2; 2521 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL); 2522 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2523 } 2524 2525 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2526 { 2527 if (rtwdev->chip->chip_id != RTL8852A) 2528 return; 2529 2530 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2531 } 2532 2533 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2534 { 2535 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2536 2537 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2538 return; 2539 2540 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2541 } 2542 2543 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2544 { 2545 int ret; 2546 2547 if (rtwdev->chip->chip_id != RTL8852A) 2548 return 0; 2549 2550 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2551 PCIE_PHY_GEN1); 2552 if (ret) 2553 return ret; 2554 2555 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2556 PCIE_PHY_GEN2); 2557 if (ret) 2558 return ret; 2559 2560 return 0; 2561 } 2562 2563 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2564 { 2565 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2566 2567 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2568 return; 2569 2570 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2571 } 2572 2573 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2574 { 2575 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2576 2577 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2578 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2579 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2580 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2581 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2582 } else if (rtwdev->chip->chip_id == RTL8852C) { 2583 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2584 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2585 } 2586 } 2587 2588 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2589 { 2590 if (!rtw89_is_rtl885xb(rtwdev)) 2591 return 0; 2592 2593 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2594 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2595 } 2596 2597 static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up) 2598 { 2599 if (pwr_up) 2600 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2601 else 2602 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2603 } 2604 2605 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2606 { 2607 if (rtwdev->chip->chip_id != RTL8852C) 2608 return; 2609 2610 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2611 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2612 } 2613 2614 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2615 { 2616 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2617 return; 2618 2619 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2620 } 2621 2622 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2623 { 2624 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2625 return; 2626 2627 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2628 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2629 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2630 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2631 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2632 } 2633 2634 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2635 { 2636 if (rtwdev->chip->chip_id != RTL8852C) 2637 return; 2638 2639 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2640 } 2641 2642 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2643 { 2644 if (rtwdev->chip->chip_id != RTL8852C) 2645 return; 2646 2647 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2648 } 2649 2650 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2651 { 2652 if (rtwdev->chip->chip_id == RTL8852C) 2653 return; 2654 2655 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2656 B_AX_SIC_EN_FORCE_CLKREQ); 2657 } 2658 2659 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2660 { 2661 const struct rtw89_pci_info *info = rtwdev->pci_info; 2662 u32 lbc; 2663 2664 if (rtwdev->chip->chip_id == RTL8852C) 2665 return; 2666 2667 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2668 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2669 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2670 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2671 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2672 } else { 2673 lbc &= ~B_AX_LBC_EN; 2674 } 2675 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2676 } 2677 2678 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2679 { 2680 const struct rtw89_pci_info *info = rtwdev->pci_info; 2681 u32 val32; 2682 2683 if (rtwdev->chip->chip_id != RTL8852C) 2684 return; 2685 2686 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2687 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2688 info->io_rcy_tmr); 2689 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2690 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2691 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2692 2693 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2694 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2695 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2696 } else { 2697 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2698 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2699 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2700 } 2701 2702 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2703 } 2704 2705 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2706 { 2707 if (rtwdev->chip->chip_id == RTL8852C) 2708 return; 2709 2710 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2711 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2712 2713 rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, 2714 B_AX_EN_STUCK_DBG | B_AX_ASFF_FULL_NO_STK, 2715 B_AX_EN_STUCK_DBG); 2716 2717 if (rtwdev->chip->chip_id == RTL8852A) 2718 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2719 B_AX_EN_CHKDSC_NO_RX_STUCK); 2720 } 2721 2722 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2723 { 2724 if (rtwdev->chip->chip_id == RTL8852C) 2725 return; 2726 2727 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2728 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2729 } 2730 2731 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) 2732 { 2733 const struct rtw89_pci_info *info = rtwdev->pci_info; 2734 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2735 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2736 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2737 B_AX_CLR_CH12_IDX; 2738 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2739 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2740 2741 if (chip_id == RTL8852A || chip_id == RTL8852C) 2742 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2743 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2744 /* clear DMA indexes */ 2745 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2746 if (chip_id == RTL8852A || chip_id == RTL8852C) 2747 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2748 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2749 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2750 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2751 } 2752 2753 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2754 { 2755 const struct rtw89_pci_info *info = rtwdev->pci_info; 2756 u32 dma_busy1 = info->dma_busy1.addr; 2757 u32 dma_busy2 = info->dma_busy2_reg; 2758 u32 check, dma_busy; 2759 int ret; 2760 2761 check = info->dma_busy1.mask; 2762 2763 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2764 10, 100, false, rtwdev, dma_busy1); 2765 if (ret) 2766 return ret; 2767 2768 if (!dma_busy2) 2769 return 0; 2770 2771 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2772 2773 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2774 10, 100, false, rtwdev, dma_busy2); 2775 if (ret) 2776 return ret; 2777 2778 return 0; 2779 } 2780 2781 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2782 { 2783 const struct rtw89_pci_info *info = rtwdev->pci_info; 2784 u32 dma_busy3 = info->dma_busy3_reg; 2785 u32 check, dma_busy; 2786 int ret; 2787 2788 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2789 2790 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2791 10, 100, false, rtwdev, dma_busy3); 2792 if (ret) 2793 return ret; 2794 2795 return 0; 2796 } 2797 2798 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2799 { 2800 u32 ret; 2801 2802 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev); 2803 if (ret) { 2804 rtw89_err(rtwdev, "txdma ch busy\n"); 2805 return ret; 2806 } 2807 2808 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev); 2809 if (ret) { 2810 rtw89_err(rtwdev, "rxdma ch busy\n"); 2811 return ret; 2812 } 2813 2814 return 0; 2815 } 2816 2817 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2818 { 2819 const struct rtw89_pci_info *info = rtwdev->pci_info; 2820 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2821 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2822 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2823 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2824 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2825 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2826 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2827 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2828 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2829 u8 cv = rtwdev->hal.cv; 2830 u32 val32; 2831 2832 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2833 if (chip_id == RTL8852A && cv == CHIP_CBV) 2834 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2835 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2836 if (chip_id == RTL8852A || chip_id == RTL8852B) 2837 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2838 } 2839 2840 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2841 if (chip_id == RTL8852A && cv == CHIP_CBV) 2842 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2843 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2844 if (chip_id == RTL8852A || chip_id == RTL8852B) 2845 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2846 } 2847 2848 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2849 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2850 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2851 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2852 2853 if (chip_id == RTL8852A || chip_id == RTL8852B) 2854 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2855 B_AX_PCIE_RX_APPLEN_MASK, 0); 2856 } 2857 2858 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2859 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2860 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2861 } else if (chip_id == RTL8852C) { 2862 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2863 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2864 } 2865 2866 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2867 if (tag_mode == MAC_AX_TAG_SGL) { 2868 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2869 ~B_AX_LATENCY_CONTROL; 2870 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2871 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2872 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2873 B_AX_LATENCY_CONTROL; 2874 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2875 } 2876 } 2877 2878 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2879 info->multi_tag_num); 2880 2881 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2882 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2883 wd_dma_idle_intvl); 2884 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2885 wd_dma_act_intvl); 2886 } else if (chip_id == RTL8852C) { 2887 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2888 wd_dma_idle_intvl); 2889 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2890 wd_dma_act_intvl); 2891 } 2892 2893 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2894 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2895 B_AX_HOST_ADDR_INFO_8B_SEL); 2896 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2897 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2898 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2899 B_AX_HOST_ADDR_INFO_8B_SEL); 2900 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2901 } 2902 2903 return 0; 2904 } 2905 2906 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2907 { 2908 const struct rtw89_pci_info *info = rtwdev->pci_info; 2909 2910 rtw89_pci_power_wake(rtwdev, false); 2911 2912 if (rtwdev->chip->chip_id == RTL8852A) { 2913 /* ltr sw trigger */ 2914 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2915 } 2916 info->ltr_set(rtwdev, false); 2917 rtw89_pci_ctrl_dma_all(rtwdev, false); 2918 rtw89_pci_clr_idx_all(rtwdev); 2919 2920 return 0; 2921 } 2922 2923 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) 2924 { 2925 const struct rtw89_pci_info *info = rtwdev->pci_info; 2926 int ret; 2927 2928 rtw89_pci_ber(rtwdev); 2929 rtw89_pci_rxdma_prefth(rtwdev); 2930 rtw89_pci_l1off_pwroff(rtwdev); 2931 rtw89_pci_deglitch_setting(rtwdev); 2932 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2933 if (ret) { 2934 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2935 return ret; 2936 } 2937 2938 rtw89_pci_aphy_pwrcut(rtwdev); 2939 rtw89_pci_hci_ldo(rtwdev); 2940 rtw89_pci_dphy_delay(rtwdev); 2941 2942 ret = rtw89_pci_autok_x(rtwdev); 2943 if (ret) { 2944 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2945 return ret; 2946 } 2947 2948 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2949 if (ret) { 2950 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2951 return ret; 2952 } 2953 2954 rtw89_pci_power_wake_ax(rtwdev, true); 2955 rtw89_pci_autoload_hang(rtwdev); 2956 rtw89_pci_l12_vmain(rtwdev); 2957 rtw89_pci_gen2_force_ib(rtwdev); 2958 rtw89_pci_l1_ent_lat(rtwdev); 2959 rtw89_pci_wd_exit_l1(rtwdev); 2960 rtw89_pci_set_sic(rtwdev); 2961 rtw89_pci_set_lbc(rtwdev); 2962 rtw89_pci_set_io_rcy(rtwdev); 2963 rtw89_pci_set_dbg(rtwdev); 2964 rtw89_pci_set_keep_reg(rtwdev); 2965 2966 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2967 2968 /* stop DMA activities */ 2969 rtw89_pci_ctrl_dma_all(rtwdev, false); 2970 2971 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2972 if (ret) { 2973 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2974 return ret; 2975 } 2976 2977 rtw89_pci_clr_idx_all(rtwdev); 2978 rtw89_pci_mode_op(rtwdev); 2979 2980 /* fill TRX BD indexes */ 2981 rtw89_pci_ops_reset(rtwdev); 2982 2983 ret = rtw89_pci_rst_bdram_ax(rtwdev); 2984 if (ret) { 2985 rtw89_warn(rtwdev, "reset bdram busy\n"); 2986 return ret; 2987 } 2988 2989 /* disable all channels except to FW CMD channel to download firmware */ 2990 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false); 2991 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true); 2992 2993 /* start DMA activities */ 2994 rtw89_pci_ctrl_dma_all(rtwdev, true); 2995 2996 return 0; 2997 } 2998 2999 static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev) 3000 { 3001 rtw89_pci_power_wake_ax(rtwdev, false); 3002 3003 return 0; 3004 } 3005 3006 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 3007 { 3008 u32 val; 3009 3010 if (!en) 3011 return 0; 3012 3013 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 3014 if (rtw89_pci_ltr_is_err_reg_val(val)) 3015 return -EINVAL; 3016 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 3017 if (rtw89_pci_ltr_is_err_reg_val(val)) 3018 return -EINVAL; 3019 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 3020 if (rtw89_pci_ltr_is_err_reg_val(val)) 3021 return -EINVAL; 3022 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 3023 if (rtw89_pci_ltr_is_err_reg_val(val)) 3024 return -EINVAL; 3025 3026 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 3027 B_AX_LTR_WD_NOEMP_CHK); 3028 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 3029 PCI_LTR_SPC_500US); 3030 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 3031 PCI_LTR_IDLE_TIMER_3_2MS); 3032 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 3033 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 3034 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 3035 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 3036 3037 return 0; 3038 } 3039 EXPORT_SYMBOL(rtw89_pci_ltr_set); 3040 3041 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 3042 { 3043 u32 dec_ctrl; 3044 u32 val32; 3045 3046 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 3047 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3048 return -EINVAL; 3049 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 3050 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3051 return -EINVAL; 3052 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 3053 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 3054 return -EINVAL; 3055 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 3056 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3057 return -EINVAL; 3058 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 3059 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3060 return -EINVAL; 3061 3062 if (!en) { 3063 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 3064 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 3065 B_AX_LTR_REQ_DRV; 3066 } else { 3067 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 3068 } 3069 3070 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 3071 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 3072 3073 if (en) 3074 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 3075 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 3076 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 3077 PCI_LTR_IDLE_TIMER_3_2MS); 3078 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 3079 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 3080 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 3081 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 3082 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 3083 3084 return 0; 3085 } 3086 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 3087 3088 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) 3089 { 3090 const struct rtw89_pci_info *info = rtwdev->pci_info; 3091 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3092 int ret; 3093 3094 ret = info->ltr_set(rtwdev, true); 3095 if (ret) { 3096 rtw89_err(rtwdev, "pci ltr set fail\n"); 3097 return ret; 3098 } 3099 if (chip_id == RTL8852A) { 3100 /* ltr sw trigger */ 3101 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 3102 } 3103 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3104 /* ADDR info 8-byte mode */ 3105 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 3106 B_AX_HOST_ADDR_INFO_8B_SEL); 3107 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 3108 } 3109 3110 /* enable DMA for all queues */ 3111 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true); 3112 3113 /* Release PCI IO */ 3114 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 3115 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 3116 3117 return 0; 3118 } 3119 3120 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 3121 struct pci_dev *pdev) 3122 { 3123 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3124 int ret; 3125 3126 ret = pci_enable_device(pdev); 3127 if (ret) { 3128 rtw89_err(rtwdev, "failed to enable pci device\n"); 3129 return ret; 3130 } 3131 3132 pci_set_master(pdev); 3133 pci_set_drvdata(pdev, rtwdev->hw); 3134 3135 rtwpci->pdev = pdev; 3136 3137 return 0; 3138 } 3139 3140 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 3141 struct pci_dev *pdev) 3142 { 3143 pci_disable_device(pdev); 3144 } 3145 3146 static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev) 3147 { 3148 const struct rtw89_chip_info *chip = rtwdev->chip; 3149 3150 switch (chip->chip_id) { 3151 case RTL8852A: 3152 case RTL8852B: 3153 case RTL8851B: 3154 case RTL8852BT: 3155 return true; 3156 default: 3157 return false; 3158 } 3159 } 3160 3161 static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev) 3162 { 3163 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3164 struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev); 3165 3166 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3167 return true; 3168 3169 if (!bridge) 3170 return false; 3171 3172 switch (bridge->vendor) { 3173 case PCI_VENDOR_ID_INTEL: 3174 return true; 3175 case PCI_VENDOR_ID_ASMEDIA: 3176 if (bridge->device == 0x2806) 3177 return true; 3178 break; 3179 } 3180 3181 return false; 3182 } 3183 3184 static int rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev, bool force) 3185 { 3186 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3187 struct pci_dev *pdev = rtwpci->pdev; 3188 int ret; 3189 u8 val; 3190 3191 if (!rtwpci->enable_dac && !force) 3192 return 0; 3193 3194 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3195 return 0; 3196 3197 /* Configure DAC only via PCI config API, not DBI interfaces */ 3198 ret = pci_read_config_byte(pdev, RTW89_PCIE_L1_CTRL, &val); 3199 if (ret) 3200 return ret; 3201 3202 val |= RTW89_PCIE_BIT_EN_64BITS; 3203 return pci_write_config_byte(pdev, RTW89_PCIE_L1_CTRL, val); 3204 } 3205 3206 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 3207 struct pci_dev *pdev) 3208 { 3209 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3210 unsigned long resource_len; 3211 u8 bar_id = 2; 3212 int ret; 3213 3214 ret = pci_request_regions(pdev, KBUILD_MODNAME); 3215 if (ret) { 3216 rtw89_err(rtwdev, "failed to request pci regions\n"); 3217 goto err; 3218 } 3219 3220 if (!rtw89_pci_is_dac_compatible_bridge(rtwdev)) 3221 goto try_dac_done; 3222 3223 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); 3224 if (!ret) { 3225 ret = rtw89_pci_cfg_dac(rtwdev, true); 3226 if (!ret) { 3227 rtwpci->enable_dac = true; 3228 goto try_dac_done; 3229 } 3230 3231 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3232 if (ret) { 3233 rtw89_err(rtwdev, 3234 "failed to set dma and consistent mask to 32/36-bit\n"); 3235 goto err_release_regions; 3236 } 3237 } 3238 try_dac_done: 3239 3240 #if defined(__FreeBSD__) 3241 linuxkpi_pcim_want_to_use_bus_functions(pdev); 3242 #endif 3243 resource_len = pci_resource_len(pdev, bar_id); 3244 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 3245 if (!rtwpci->mmap) { 3246 rtw89_err(rtwdev, "failed to map pci io\n"); 3247 ret = -EIO; 3248 goto err_release_regions; 3249 } 3250 3251 return 0; 3252 3253 err_release_regions: 3254 pci_release_regions(pdev); 3255 err: 3256 return ret; 3257 } 3258 3259 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 3260 struct pci_dev *pdev) 3261 { 3262 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3263 3264 if (rtwpci->mmap) { 3265 pci_iounmap(pdev, rtwpci->mmap); 3266 pci_release_regions(pdev); 3267 } 3268 } 3269 3270 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 3271 struct pci_dev *pdev, 3272 struct rtw89_pci_tx_ring *tx_ring) 3273 { 3274 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3275 u8 *head = wd_ring->head; 3276 dma_addr_t dma = wd_ring->dma; 3277 u32 page_size = wd_ring->page_size; 3278 u32 page_num = wd_ring->page_num; 3279 u32 ring_sz = page_size * page_num; 3280 3281 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3282 wd_ring->head = NULL; 3283 } 3284 3285 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 3286 struct pci_dev *pdev, 3287 struct rtw89_pci_tx_ring *tx_ring) 3288 { 3289 int ring_sz; 3290 u8 *head; 3291 dma_addr_t dma; 3292 3293 head = tx_ring->bd_ring.head; 3294 dma = tx_ring->bd_ring.dma; 3295 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 3296 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3297 3298 tx_ring->bd_ring.head = NULL; 3299 } 3300 3301 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 3302 struct pci_dev *pdev) 3303 { 3304 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3305 const struct rtw89_pci_info *info = rtwdev->pci_info; 3306 struct rtw89_pci_tx_ring *tx_ring; 3307 int i; 3308 3309 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3310 if (info->tx_dma_ch_mask & BIT(i)) 3311 continue; 3312 tx_ring = &rtwpci->tx_rings[i]; 3313 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3314 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3315 } 3316 } 3317 3318 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 3319 struct pci_dev *pdev, 3320 struct rtw89_pci_rx_ring *rx_ring) 3321 { 3322 struct rtw89_pci_rx_info *rx_info; 3323 struct sk_buff *skb; 3324 dma_addr_t dma; 3325 u32 buf_sz; 3326 u8 *head; 3327 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 3328 int i; 3329 3330 buf_sz = rx_ring->buf_sz; 3331 for (i = 0; i < rx_ring->bd_ring.len; i++) { 3332 skb = rx_ring->buf[i]; 3333 if (!skb) 3334 continue; 3335 3336 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3337 dma = rx_info->dma; 3338 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3339 dev_kfree_skb(skb); 3340 rx_ring->buf[i] = NULL; 3341 } 3342 3343 head = rx_ring->bd_ring.head; 3344 dma = rx_ring->bd_ring.dma; 3345 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3346 3347 rx_ring->bd_ring.head = NULL; 3348 } 3349 3350 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 3351 struct pci_dev *pdev) 3352 { 3353 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3354 struct rtw89_pci_rx_ring *rx_ring; 3355 int i; 3356 3357 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3358 rx_ring = &rtwpci->rx_rings[i]; 3359 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3360 } 3361 } 3362 3363 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 3364 struct pci_dev *pdev) 3365 { 3366 rtw89_pci_free_rx_rings(rtwdev, pdev); 3367 rtw89_pci_free_tx_rings(rtwdev, pdev); 3368 } 3369 3370 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 3371 struct rtw89_pci_rx_ring *rx_ring, 3372 struct sk_buff *skb, int buf_sz, u32 idx) 3373 { 3374 struct rtw89_pci_rx_info *rx_info; 3375 struct rtw89_pci_rx_bd_32 *rx_bd; 3376 dma_addr_t dma; 3377 3378 if (!skb) 3379 return -EINVAL; 3380 3381 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 3382 if (dma_mapping_error(&pdev->dev, dma)) 3383 return -EBUSY; 3384 3385 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3386 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 3387 3388 memset(rx_bd, 0, sizeof(*rx_bd)); 3389 rx_bd->buf_size = cpu_to_le16(buf_sz); 3390 rx_bd->dma = cpu_to_le32(dma); 3391 rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI); 3392 rx_info->dma = dma; 3393 3394 return 0; 3395 } 3396 3397 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 3398 struct pci_dev *pdev, 3399 struct rtw89_pci_tx_ring *tx_ring, 3400 enum rtw89_tx_channel txch) 3401 { 3402 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3403 struct rtw89_pci_tx_wd *txwd; 3404 dma_addr_t dma; 3405 dma_addr_t cur_paddr; 3406 u8 *head; 3407 u8 *cur_vaddr; 3408 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 3409 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 3410 u32 ring_sz = page_size * page_num; 3411 u32 page_offset; 3412 int i; 3413 3414 /* FWCMD queue doesn't use txwd as pages */ 3415 if (txch == RTW89_TXCH_CH12) 3416 return 0; 3417 3418 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3419 if (!head) 3420 return -ENOMEM; 3421 3422 INIT_LIST_HEAD(&wd_ring->free_pages); 3423 wd_ring->head = head; 3424 wd_ring->dma = dma; 3425 wd_ring->page_size = page_size; 3426 wd_ring->page_num = page_num; 3427 3428 page_offset = 0; 3429 for (i = 0; i < page_num; i++) { 3430 txwd = &wd_ring->pages[i]; 3431 cur_paddr = dma + page_offset; 3432 cur_vaddr = head + page_offset; 3433 3434 skb_queue_head_init(&txwd->queue); 3435 INIT_LIST_HEAD(&txwd->list); 3436 txwd->paddr = cur_paddr; 3437 txwd->vaddr = cur_vaddr; 3438 txwd->len = page_size; 3439 txwd->seq = i; 3440 rtw89_pci_enqueue_txwd(tx_ring, txwd); 3441 3442 page_offset += page_size; 3443 } 3444 3445 return 0; 3446 } 3447 3448 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 3449 struct pci_dev *pdev, 3450 struct rtw89_pci_tx_ring *tx_ring, 3451 u32 desc_size, u32 len, 3452 enum rtw89_tx_channel txch) 3453 { 3454 const struct rtw89_pci_ch_dma_addr *txch_addr; 3455 int ring_sz = desc_size * len; 3456 u8 *head; 3457 dma_addr_t dma; 3458 int ret; 3459 3460 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 3461 if (ret) { 3462 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 3463 goto err; 3464 } 3465 3466 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3467 if (ret) { 3468 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3469 goto err_free_wd_ring; 3470 } 3471 3472 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3473 if (!head) { 3474 ret = -ENOMEM; 3475 goto err_free_wd_ring; 3476 } 3477 3478 INIT_LIST_HEAD(&tx_ring->busy_pages); 3479 tx_ring->bd_ring.head = head; 3480 tx_ring->bd_ring.dma = dma; 3481 tx_ring->bd_ring.len = len; 3482 tx_ring->bd_ring.desc_size = desc_size; 3483 tx_ring->bd_ring.addr = *txch_addr; 3484 tx_ring->bd_ring.wp = 0; 3485 tx_ring->bd_ring.rp = 0; 3486 tx_ring->txch = txch; 3487 3488 return 0; 3489 3490 err_free_wd_ring: 3491 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3492 err: 3493 return ret; 3494 } 3495 3496 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 3497 struct pci_dev *pdev) 3498 { 3499 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3500 const struct rtw89_pci_info *info = rtwdev->pci_info; 3501 struct rtw89_pci_tx_ring *tx_ring; 3502 u32 desc_size; 3503 u32 len; 3504 u32 i, tx_allocated; 3505 int ret; 3506 3507 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3508 if (info->tx_dma_ch_mask & BIT(i)) 3509 continue; 3510 tx_ring = &rtwpci->tx_rings[i]; 3511 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3512 len = RTW89_PCI_TXBD_NUM_MAX; 3513 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3514 desc_size, len, i); 3515 if (ret) { 3516 #if defined(__linux__) 3517 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3518 #elif defined(__FreeBSD__) 3519 rtw89_err(rtwdev, "failed to alloc tx ring %d: ret=%d\n", i, ret); 3520 #endif 3521 goto err_free; 3522 } 3523 } 3524 3525 return 0; 3526 3527 err_free: 3528 tx_allocated = i; 3529 for (i = 0; i < tx_allocated; i++) { 3530 tx_ring = &rtwpci->tx_rings[i]; 3531 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3532 } 3533 3534 return ret; 3535 } 3536 3537 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3538 struct pci_dev *pdev, 3539 struct rtw89_pci_rx_ring *rx_ring, 3540 u32 desc_size, u32 len, u32 rxch) 3541 { 3542 const struct rtw89_pci_info *info = rtwdev->pci_info; 3543 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3544 struct sk_buff *skb; 3545 u8 *head; 3546 dma_addr_t dma; 3547 int ring_sz = desc_size * len; 3548 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3549 int i, allocated; 3550 int ret; 3551 3552 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3553 if (ret) { 3554 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3555 return ret; 3556 } 3557 3558 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3559 if (!head) { 3560 ret = -ENOMEM; 3561 goto err; 3562 } 3563 3564 rx_ring->bd_ring.head = head; 3565 rx_ring->bd_ring.dma = dma; 3566 rx_ring->bd_ring.len = len; 3567 rx_ring->bd_ring.desc_size = desc_size; 3568 rx_ring->bd_ring.addr = *rxch_addr; 3569 if (info->rx_ring_eq_is_full) 3570 rx_ring->bd_ring.wp = len - 1; 3571 else 3572 rx_ring->bd_ring.wp = 0; 3573 rx_ring->bd_ring.rp = 0; 3574 rx_ring->buf_sz = buf_sz; 3575 rx_ring->diliver_skb = NULL; 3576 rx_ring->diliver_desc.ready = false; 3577 rx_ring->target_rx_tag = 0; 3578 3579 for (i = 0; i < len; i++) { 3580 skb = dev_alloc_skb(buf_sz); 3581 if (!skb) { 3582 ret = -ENOMEM; 3583 goto err_free; 3584 } 3585 3586 memset(skb->data, 0, buf_sz); 3587 rx_ring->buf[i] = skb; 3588 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3589 buf_sz, i); 3590 if (ret) { 3591 #if defined(__linux__) 3592 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3593 #elif defined(__FreeBSD__) 3594 rtw89_err(rtwdev, "failed to init rx buf %d ret=%d\n", i, ret); 3595 #endif 3596 dev_kfree_skb_any(skb); 3597 rx_ring->buf[i] = NULL; 3598 goto err_free; 3599 } 3600 } 3601 3602 return 0; 3603 3604 err_free: 3605 allocated = i; 3606 for (i = 0; i < allocated; i++) { 3607 skb = rx_ring->buf[i]; 3608 if (!skb) 3609 continue; 3610 dma = *((dma_addr_t *)skb->cb); 3611 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3612 dev_kfree_skb(skb); 3613 rx_ring->buf[i] = NULL; 3614 } 3615 3616 head = rx_ring->bd_ring.head; 3617 dma = rx_ring->bd_ring.dma; 3618 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3619 3620 rx_ring->bd_ring.head = NULL; 3621 err: 3622 return ret; 3623 } 3624 3625 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3626 struct pci_dev *pdev) 3627 { 3628 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3629 struct rtw89_pci_rx_ring *rx_ring; 3630 u32 desc_size; 3631 u32 len; 3632 int i, rx_allocated; 3633 int ret; 3634 3635 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3636 rx_ring = &rtwpci->rx_rings[i]; 3637 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3638 len = RTW89_PCI_RXBD_NUM_MAX; 3639 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3640 desc_size, len, i); 3641 if (ret) { 3642 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3643 goto err_free; 3644 } 3645 } 3646 3647 return 0; 3648 3649 err_free: 3650 rx_allocated = i; 3651 for (i = 0; i < rx_allocated; i++) { 3652 rx_ring = &rtwpci->rx_rings[i]; 3653 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3654 } 3655 3656 return ret; 3657 } 3658 3659 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3660 struct pci_dev *pdev) 3661 { 3662 int ret; 3663 3664 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3665 if (ret) { 3666 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3667 goto err; 3668 } 3669 3670 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3671 if (ret) { 3672 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3673 goto err_free_tx_rings; 3674 } 3675 3676 return 0; 3677 3678 err_free_tx_rings: 3679 rtw89_pci_free_tx_rings(rtwdev, pdev); 3680 err: 3681 return ret; 3682 } 3683 3684 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3685 struct rtw89_pci *rtwpci) 3686 { 3687 skb_queue_head_init(&rtwpci->h2c_queue); 3688 skb_queue_head_init(&rtwpci->h2c_release_queue); 3689 } 3690 3691 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3692 struct pci_dev *pdev) 3693 { 3694 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3695 int ret; 3696 3697 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3698 if (ret) { 3699 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3700 goto err; 3701 } 3702 3703 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3704 if (ret) { 3705 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3706 goto err_pci_unmap; 3707 } 3708 3709 rtw89_pci_h2c_init(rtwdev, rtwpci); 3710 3711 spin_lock_init(&rtwpci->irq_lock); 3712 spin_lock_init(&rtwpci->trx_lock); 3713 3714 return 0; 3715 3716 err_pci_unmap: 3717 rtw89_pci_clear_mapping(rtwdev, pdev); 3718 err: 3719 return ret; 3720 } 3721 3722 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3723 struct pci_dev *pdev) 3724 { 3725 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3726 3727 rtw89_pci_free_trx_rings(rtwdev, pdev); 3728 rtw89_pci_clear_mapping(rtwdev, pdev); 3729 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3730 skb_queue_len(&rtwpci->h2c_queue), true); 3731 } 3732 3733 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3734 { 3735 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3736 const struct rtw89_chip_info *chip = rtwdev->chip; 3737 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; 3738 3739 if (chip->chip_id == RTL8851B) 3740 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; 3741 3742 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3743 3744 if (rtwpci->under_recovery) { 3745 rtwpci->intrs[0] = hs0isr_ind_int_en; 3746 rtwpci->intrs[1] = 0; 3747 } else { 3748 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3749 B_AX_RXDMA_INT_EN | 3750 B_AX_RXP1DMA_INT_EN | 3751 B_AX_RPQDMA_INT_EN | 3752 B_AX_RXDMA_STUCK_INT_EN | 3753 B_AX_RDU_INT_EN | 3754 B_AX_RPQBD_FULL_INT_EN | 3755 hs0isr_ind_int_en; 3756 3757 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3758 } 3759 } 3760 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3761 3762 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3763 { 3764 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3765 3766 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3767 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3768 rtwpci->intrs[0] = 0; 3769 rtwpci->intrs[1] = 0; 3770 } 3771 3772 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3773 { 3774 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3775 3776 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3777 B_AX_HS1ISR_IND_INT_EN | 3778 B_AX_HS0ISR_IND_INT_EN; 3779 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3780 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3781 B_AX_RXDMA_INT_EN | 3782 B_AX_RXP1DMA_INT_EN | 3783 B_AX_RPQDMA_INT_EN | 3784 B_AX_RXDMA_STUCK_INT_EN | 3785 B_AX_RDU_INT_EN | 3786 B_AX_RPQBD_FULL_INT_EN; 3787 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3788 } 3789 3790 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3791 { 3792 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3793 3794 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3795 B_AX_HS0ISR_IND_INT_EN; 3796 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3797 rtwpci->intrs[0] = 0; 3798 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3799 } 3800 3801 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3802 { 3803 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3804 3805 if (rtwpci->under_recovery) 3806 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3807 else if (rtwpci->low_power) 3808 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3809 else 3810 rtw89_pci_default_intr_mask_v1(rtwdev); 3811 } 3812 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3813 3814 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev) 3815 { 3816 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3817 3818 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3819 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3820 rtwpci->intrs[0] = 0; 3821 rtwpci->intrs[1] = 0; 3822 } 3823 3824 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev) 3825 { 3826 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3827 3828 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 | 3829 B_BE_HS0_IND_INT_EN0; 3830 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3831 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 | 3832 B_BE_RDU_CH0_INT_IMR_V1; 3833 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3834 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3835 } 3836 3837 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev) 3838 { 3839 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3840 3841 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 | 3842 B_BE_HS1_IND_INT_EN0; 3843 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3844 rtwpci->intrs[0] = 0; 3845 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3846 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3847 } 3848 3849 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev) 3850 { 3851 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3852 3853 if (rtwpci->under_recovery) 3854 rtw89_pci_recovery_intr_mask_v2(rtwdev); 3855 else if (rtwpci->low_power) 3856 rtw89_pci_low_power_intr_mask_v2(rtwdev); 3857 else 3858 rtw89_pci_default_intr_mask_v2(rtwdev); 3859 } 3860 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); 3861 3862 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3863 struct pci_dev *pdev) 3864 { 3865 unsigned long flags = 0; 3866 int ret; 3867 3868 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI; 3869 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3870 if (ret < 0) { 3871 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3872 goto err; 3873 } 3874 3875 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3876 rtw89_pci_interrupt_handler, 3877 rtw89_pci_interrupt_threadfn, 3878 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3879 if (ret) { 3880 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3881 goto err_free_vector; 3882 } 3883 3884 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3885 3886 return 0; 3887 3888 err_free_vector: 3889 pci_free_irq_vectors(pdev); 3890 err: 3891 return ret; 3892 } 3893 3894 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3895 struct pci_dev *pdev) 3896 { 3897 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3898 pci_free_irq_vectors(pdev); 3899 } 3900 3901 static u16 gray_code_to_bin(u16 gray_code) 3902 { 3903 u16 binary = gray_code; 3904 3905 while (gray_code) { 3906 gray_code >>= 1; 3907 binary ^= gray_code; 3908 } 3909 3910 return binary; 3911 } 3912 3913 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3914 { 3915 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3916 struct pci_dev *pdev = rtwpci->pdev; 3917 u16 val16, filter_out_val; 3918 u32 val, phy_offset; 3919 int ret; 3920 3921 if (rtwdev->chip->chip_id != RTL8852C) 3922 return 0; 3923 3924 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3925 if (val == B_AX_ASPM_CTRL_L1) 3926 return 0; 3927 3928 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3929 if (ret) 3930 return ret; 3931 3932 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3933 if (val == RTW89_PCIE_GEN1_SPEED) { 3934 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3935 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3936 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3937 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3938 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3939 val16 | B_PCIE_BIT_PINOUT_DIS); 3940 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3941 val16 & ~B_PCIE_BIT_RD_SEL); 3942 3943 val16 = rtw89_read16_mask(rtwdev, 3944 phy_offset + RAC_ANA1F * RAC_MULT, 3945 FILTER_OUT_EQ_MASK); 3946 val16 = gray_code_to_bin(val16); 3947 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3948 RAC_MULT); 3949 filter_out_val &= ~REG_FILTER_OUT_MASK; 3950 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3951 3952 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3953 filter_out_val); 3954 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3955 B_BAC_EQ_SEL); 3956 rtw89_write16_set(rtwdev, 3957 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3958 B_PCIE_BIT_PSAVE); 3959 } else { 3960 return -EOPNOTSUPP; 3961 } 3962 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3963 B_PCIE_BIT_PSAVE); 3964 3965 return 0; 3966 } 3967 3968 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3969 { 3970 const struct rtw89_pci_info *info = rtwdev->pci_info; 3971 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3972 3973 if (rtw89_pci_disable_clkreq) 3974 return; 3975 3976 gen_def->clkreq_set(rtwdev, enable); 3977 } 3978 3979 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) 3980 { 3981 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3982 int ret; 3983 3984 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3985 PCIE_CLKDLY_HW_30US); 3986 if (ret) 3987 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3988 3989 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3990 if (enable) 3991 ret = rtw89_pci_config_byte_set(rtwdev, 3992 RTW89_PCIE_L1_CTRL, 3993 RTW89_PCIE_BIT_CLK); 3994 else 3995 ret = rtw89_pci_config_byte_clr(rtwdev, 3996 RTW89_PCIE_L1_CTRL, 3997 RTW89_PCIE_BIT_CLK); 3998 if (ret) 3999 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 4000 enable ? "set" : "unset", ret); 4001 } else if (chip_id == RTL8852C) { 4002 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 4003 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 4004 if (enable) 4005 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 4006 B_AX_CLK_REQ_N); 4007 else 4008 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 4009 B_AX_CLK_REQ_N); 4010 } 4011 } 4012 4013 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 4014 { 4015 const struct rtw89_pci_info *info = rtwdev->pci_info; 4016 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4017 4018 if (rtw89_pci_disable_aspm_l1) 4019 return; 4020 4021 gen_def->aspm_set(rtwdev, enable); 4022 } 4023 4024 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) 4025 { 4026 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4027 u8 value = 0; 4028 int ret; 4029 4030 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 4031 if (ret) 4032 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 4033 4034 u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK); 4035 u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK); 4036 4037 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 4038 if (ret) 4039 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 4040 4041 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4042 if (enable) 4043 ret = rtw89_pci_config_byte_set(rtwdev, 4044 RTW89_PCIE_L1_CTRL, 4045 RTW89_PCIE_BIT_L1); 4046 else 4047 ret = rtw89_pci_config_byte_clr(rtwdev, 4048 RTW89_PCIE_L1_CTRL, 4049 RTW89_PCIE_BIT_L1); 4050 } else if (chip_id == RTL8852C) { 4051 if (enable) 4052 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4053 B_AX_ASPM_CTRL_L1); 4054 else 4055 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4056 B_AX_ASPM_CTRL_L1); 4057 } 4058 if (ret) 4059 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 4060 enable ? "set" : "unset", ret); 4061 } 4062 4063 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 4064 { 4065 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 4066 const struct rtw89_pci_info *info = rtwdev->pci_info; 4067 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4068 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 4069 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 4070 u32 val = 0; 4071 4072 if (rtwdev->scanning || 4073 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH)) 4074 goto out; 4075 4076 if (chip_gen == RTW89_CHIP_BE) 4077 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN; 4078 else 4079 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 4080 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 4081 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 4082 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 4083 4084 out: 4085 rtw89_write32(rtwdev, info->mit_addr, val); 4086 } 4087 4088 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 4089 { 4090 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4091 struct pci_dev *pdev = rtwpci->pdev; 4092 u16 link_ctrl; 4093 int ret; 4094 4095 /* Though there is standard PCIE configuration space to set the 4096 * link control register, but by Realtek's design, driver should 4097 * check if host supports CLKREQ/ASPM to enable the HW module. 4098 * 4099 * These functions are implemented by two HW modules associated, 4100 * one is responsible to access PCIE configuration space to 4101 * follow the host settings, and another is in charge of doing 4102 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 4103 * the host does not support it, and due to some reasons or wrong 4104 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 4105 * loss if HW misbehaves on the link. 4106 * 4107 * Hence it's designed that driver should first check the PCIE 4108 * configuration space is sync'ed and enabled, then driver can turn 4109 * on the other module that is actually working on the mechanism. 4110 */ 4111 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 4112 if (ret) { 4113 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 4114 return; 4115 } 4116 4117 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 4118 rtw89_pci_clkreq_set(rtwdev, true); 4119 4120 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 4121 rtw89_pci_aspm_set(rtwdev, true); 4122 } 4123 4124 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 4125 { 4126 const struct rtw89_pci_info *info = rtwdev->pci_info; 4127 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4128 4129 if (rtw89_pci_disable_l1ss) 4130 return; 4131 4132 gen_def->l1ss_set(rtwdev, enable); 4133 } 4134 4135 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) 4136 { 4137 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4138 int ret; 4139 4140 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4141 if (enable) 4142 ret = rtw89_pci_config_byte_set(rtwdev, 4143 RTW89_PCIE_TIMER_CTRL, 4144 RTW89_PCIE_BIT_L1SUB); 4145 else 4146 ret = rtw89_pci_config_byte_clr(rtwdev, 4147 RTW89_PCIE_TIMER_CTRL, 4148 RTW89_PCIE_BIT_L1SUB); 4149 if (ret) 4150 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 4151 enable ? "set" : "unset", ret); 4152 } else if (chip_id == RTL8852C) { 4153 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 4154 RTW89_PCIE_BIT_ASPM_L11 | 4155 RTW89_PCIE_BIT_PCI_L11); 4156 if (ret) 4157 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 4158 if (enable) 4159 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4160 B_AX_L1SUB_DISABLE); 4161 else 4162 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4163 B_AX_L1SUB_DISABLE); 4164 } 4165 } 4166 4167 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 4168 { 4169 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4170 struct pci_dev *pdev = rtwpci->pdev; 4171 u32 l1ss_cap_ptr, l1ss_ctrl; 4172 4173 if (rtw89_pci_disable_l1ss) 4174 return; 4175 4176 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 4177 if (!l1ss_cap_ptr) 4178 return; 4179 4180 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 4181 4182 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 4183 rtw89_pci_l1ss_set(rtwdev, true); 4184 } 4185 4186 static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev) 4187 { 4188 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4189 struct pci_dev *pdev = rtwpci->pdev; 4190 4191 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, 4192 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS); 4193 } 4194 4195 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev) 4196 { 4197 int ret = 0; 4198 u32 sts; 4199 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 4200 4201 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 4202 10, 1000, false, rtwdev, 4203 R_AX_PCIE_DMA_BUSY1); 4204 if (ret) { 4205 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 4206 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 4207 return -EINVAL; 4208 } 4209 return ret; 4210 } 4211 4212 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev) 4213 { 4214 u32 val; 4215 int ret; 4216 4217 if (rtwdev->chip->chip_id == RTL8852C) 4218 return 0; 4219 4220 rtw89_pci_ctrl_dma_all(rtwdev, false); 4221 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4222 if (ret) { 4223 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4224 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4225 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 4226 R_AX_DBG_ERR_FLAG, val); 4227 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 4228 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 4229 if (val & B_AX_RX_STUCK) 4230 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 4231 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4232 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4233 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4234 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4235 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 4236 R_AX_DBG_ERR_FLAG, val); 4237 } 4238 4239 return ret; 4240 } 4241 4242 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) 4243 { 4244 u32 ret; 4245 4246 if (rtwdev->chip->chip_id == RTL8852C) 4247 return 0; 4248 4249 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 4250 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4251 rtw89_pci_clr_idx_all(rtwdev); 4252 4253 ret = rtw89_pci_rst_bdram_ax(rtwdev); 4254 if (ret) 4255 return ret; 4256 4257 rtw89_pci_ctrl_dma_all(rtwdev, true); 4258 return ret; 4259 } 4260 4261 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 4262 enum rtw89_lv1_rcvy_step step) 4263 { 4264 const struct rtw89_pci_info *info = rtwdev->pci_info; 4265 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4266 int ret; 4267 4268 switch (step) { 4269 case RTW89_LV1_RCVY_STEP_1: 4270 ret = gen_def->lv1rst_stop_dma(rtwdev); 4271 if (ret) 4272 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 4273 4274 break; 4275 4276 case RTW89_LV1_RCVY_STEP_2: 4277 ret = gen_def->lv1rst_start_dma(rtwdev); 4278 if (ret) 4279 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 4280 break; 4281 4282 default: 4283 return -EINVAL; 4284 } 4285 4286 return ret; 4287 } 4288 4289 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 4290 { 4291 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 4292 return; 4293 4294 if (rtwdev->chip->chip_id == RTL8852C) { 4295 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4296 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1)); 4297 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4298 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1)); 4299 } else { 4300 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 4301 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 4302 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4303 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 4304 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4305 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 4306 } 4307 } 4308 4309 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 4310 { 4311 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 4312 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4313 const struct rtw89_pci_info *info = rtwdev->pci_info; 4314 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4315 unsigned long flags; 4316 int work_done; 4317 4318 rtwdev->napi_budget_countdown = budget; 4319 4320 rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data); 4321 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4322 if (work_done == budget) 4323 return budget; 4324 4325 rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data); 4326 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4327 if (work_done < budget && napi_complete_done(napi, work_done)) { 4328 spin_lock_irqsave(&rtwpci->irq_lock, flags); 4329 if (likely(rtwpci->running)) 4330 rtw89_chip_enable_intr(rtwdev, rtwpci); 4331 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 4332 } 4333 4334 return work_done; 4335 } 4336 4337 static 4338 void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev, 4339 struct pci_dev *pdev, 4340 const struct rtw89_pci_ssid_quirk *ssid_quirks) 4341 { 4342 int i; 4343 4344 if (!ssid_quirks) 4345 return; 4346 4347 for (i = 0; i < 200; i++, ssid_quirks++) { 4348 if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0) 4349 break; 4350 4351 if (ssid_quirks->vendor != pdev->vendor || 4352 ssid_quirks->device != pdev->device || 4353 ssid_quirks->subsystem_vendor != pdev->subsystem_vendor || 4354 ssid_quirks->subsystem_device != pdev->subsystem_device) 4355 continue; 4356 4357 bitmap_or(rtwdev->quirks, rtwdev->quirks, &ssid_quirks->bitmap, 4358 NUM_OF_RTW89_QUIRKS); 4359 rtwdev->custid = ssid_quirks->custid; 4360 break; 4361 } 4362 4363 rtw89_debug(rtwdev, RTW89_DBG_HCI, "quirks=%*ph custid=%d\n", 4364 (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid); 4365 } 4366 4367 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 4368 { 4369 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4370 struct rtw89_dev *rtwdev = hw->priv; 4371 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4372 4373 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4374 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4375 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4376 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4377 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 4378 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4379 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 4380 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4381 } else { 4382 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4383 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4384 } 4385 4386 return 0; 4387 } 4388 4389 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 4390 { 4391 if (rtwdev->chip->chip_id == RTL8852C) 4392 return; 4393 4394 /* Hardware need write the reg twice to ensure the setting work */ 4395 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4396 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4397 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4398 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4399 } 4400 4401 void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume) 4402 { 4403 if (resume) 4404 rtw89_pci_cfg_dac(rtwdev, false); 4405 4406 rtw89_pci_disable_eq(rtwdev); 4407 rtw89_pci_filter_out(rtwdev); 4408 rtw89_pci_cpl_timeout_cfg(rtwdev); 4409 rtw89_pci_link_cfg(rtwdev); 4410 rtw89_pci_l1ss_cfg(rtwdev); 4411 } 4412 4413 static int __maybe_unused rtw89_pci_resume(struct device *dev) 4414 { 4415 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4416 struct rtw89_dev *rtwdev = hw->priv; 4417 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4418 4419 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4420 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4421 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4422 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4423 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 4424 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4425 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 4426 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4427 } else { 4428 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4429 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4430 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4431 B_AX_SEL_REQ_ENTR_L1); 4432 } 4433 rtw89_pci_l2_hci_ldo(rtwdev); 4434 4435 rtw89_pci_basic_cfg(rtwdev, true); 4436 4437 return 0; 4438 } 4439 4440 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 4441 EXPORT_SYMBOL(rtw89_pm_ops); 4442 4443 static pci_ers_result_t rtw89_pci_io_error_detected(struct pci_dev *pdev, 4444 pci_channel_state_t state) 4445 { 4446 struct net_device *netdev = pci_get_drvdata(pdev); 4447 4448 netif_device_detach(netdev); 4449 4450 return PCI_ERS_RESULT_NEED_RESET; 4451 } 4452 4453 static pci_ers_result_t rtw89_pci_io_slot_reset(struct pci_dev *pdev) 4454 { 4455 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4456 struct rtw89_dev *rtwdev = hw->priv; 4457 4458 rtw89_ser_notify(rtwdev, MAC_AX_ERR_ASSERTION); 4459 4460 return PCI_ERS_RESULT_RECOVERED; 4461 } 4462 4463 static void rtw89_pci_io_resume(struct pci_dev *pdev) 4464 { 4465 struct net_device *netdev = pci_get_drvdata(pdev); 4466 4467 /* ack any pending wake events, disable PME */ 4468 pci_enable_wake(pdev, PCI_D0, 0); 4469 4470 netif_device_attach(netdev); 4471 } 4472 4473 const struct pci_error_handlers rtw89_pci_err_handler = { 4474 .error_detected = rtw89_pci_io_error_detected, 4475 .slot_reset = rtw89_pci_io_slot_reset, 4476 .resume = rtw89_pci_io_resume, 4477 }; 4478 EXPORT_SYMBOL(rtw89_pci_err_handler); 4479 4480 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4481 .isr_rdu = B_AX_RDU_INT, 4482 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, 4483 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, 4484 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, 4485 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | 4486 B_AX_RDU_INT}, 4487 4488 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4489 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax, 4490 .mac_post_init = rtw89_pci_ops_mac_post_init_ax, 4491 4492 .clr_idx_all = rtw89_pci_clr_idx_all_ax, 4493 .rst_bdram = rtw89_pci_rst_bdram_ax, 4494 4495 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax, 4496 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax, 4497 4498 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax, 4499 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax, 4500 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax, 4501 4502 .aspm_set = rtw89_pci_aspm_set_ax, 4503 .clkreq_set = rtw89_pci_clkreq_set_ax, 4504 .l1ss_set = rtw89_pci_l1ss_set_ax, 4505 4506 .disable_eq = rtw89_pci_disable_eq_ax, 4507 .power_wake = rtw89_pci_power_wake_ax, 4508 }; 4509 EXPORT_SYMBOL(rtw89_pci_gen_ax); 4510 4511 static const struct rtw89_hci_ops rtw89_pci_ops = { 4512 .tx_write = rtw89_pci_ops_tx_write, 4513 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 4514 .flush_queues = rtw89_pci_ops_flush_queues, 4515 .reset = rtw89_pci_ops_reset, 4516 .start = rtw89_pci_ops_start, 4517 .stop = rtw89_pci_ops_stop, 4518 .pause = rtw89_pci_ops_pause, 4519 .switch_mode = rtw89_pci_ops_switch_mode, 4520 .recalc_int_mit = rtw89_pci_recalc_int_mit, 4521 4522 .read8 = rtw89_pci_ops_read8, 4523 .read16 = rtw89_pci_ops_read16, 4524 .read32 = rtw89_pci_ops_read32, 4525 .write8 = rtw89_pci_ops_write8, 4526 .write16 = rtw89_pci_ops_write16, 4527 .write32 = rtw89_pci_ops_write32, 4528 4529 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 4530 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit, 4531 .mac_post_init = rtw89_pci_ops_mac_post_init, 4532 .deinit = rtw89_pci_ops_deinit, 4533 4534 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 4535 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 4536 .dump_err_status = rtw89_pci_ops_dump_err_status, 4537 .napi_poll = rtw89_pci_napi_poll, 4538 4539 .recovery_start = rtw89_pci_ops_recovery_start, 4540 .recovery_complete = rtw89_pci_ops_recovery_complete, 4541 4542 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch, 4543 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch, 4544 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 4545 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle, 4546 4547 .clr_idx_all = rtw89_pci_clr_idx_all, 4548 .clear = rtw89_pci_clear_resource, 4549 .disable_intr = rtw89_pci_disable_intr_lock, 4550 .enable_intr = rtw89_pci_enable_intr_lock, 4551 .rst_bdram = rtw89_pci_reset_bdram, 4552 }; 4553 4554 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4555 { 4556 struct rtw89_dev *rtwdev; 4557 const struct rtw89_driver_info *info; 4558 const struct rtw89_pci_info *pci_info; 4559 int ret; 4560 4561 info = (const struct rtw89_driver_info *)id->driver_data; 4562 4563 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 4564 sizeof(struct rtw89_pci), 4565 info->chip, info->variant); 4566 if (!rtwdev) { 4567 dev_err(&pdev->dev, "failed to allocate hw\n"); 4568 return -ENOMEM; 4569 } 4570 4571 pci_info = info->bus.pci; 4572 4573 rtwdev->pci_info = info->bus.pci; 4574 rtwdev->hci.ops = &rtw89_pci_ops; 4575 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 4576 rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_PCIE; 4577 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 4578 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 4579 4580 rtw89_check_quirks(rtwdev, info->quirks); 4581 rtw89_check_pci_ssid_quirks(rtwdev, pdev, pci_info->ssid_quirks); 4582 4583 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 4584 4585 ret = rtw89_core_init(rtwdev); 4586 if (ret) { 4587 rtw89_err(rtwdev, "failed to initialise core\n"); 4588 goto err_release_hw; 4589 } 4590 4591 ret = rtw89_pci_claim_device(rtwdev, pdev); 4592 if (ret) { 4593 rtw89_err(rtwdev, "failed to claim pci device\n"); 4594 goto err_core_deinit; 4595 } 4596 4597 ret = rtw89_pci_setup_resource(rtwdev, pdev); 4598 if (ret) { 4599 rtw89_err(rtwdev, "failed to setup pci resource\n"); 4600 goto err_declaim_pci; 4601 } 4602 4603 ret = rtw89_chip_info_setup(rtwdev); 4604 if (ret) { 4605 rtw89_err(rtwdev, "failed to setup chip information\n"); 4606 goto err_clear_resource; 4607 } 4608 4609 rtw89_pci_basic_cfg(rtwdev, false); 4610 4611 ret = rtw89_core_napi_init(rtwdev); 4612 if (ret) { 4613 rtw89_err(rtwdev, "failed to init napi\n"); 4614 goto err_clear_resource; 4615 } 4616 4617 ret = rtw89_pci_request_irq(rtwdev, pdev); 4618 if (ret) { 4619 rtw89_err(rtwdev, "failed to request pci irq\n"); 4620 goto err_deinit_napi; 4621 } 4622 4623 ret = rtw89_core_register(rtwdev); 4624 if (ret) { 4625 rtw89_err(rtwdev, "failed to register core\n"); 4626 goto err_free_irq; 4627 } 4628 4629 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags); 4630 4631 return 0; 4632 4633 err_free_irq: 4634 rtw89_pci_free_irq(rtwdev, pdev); 4635 err_deinit_napi: 4636 rtw89_core_napi_deinit(rtwdev); 4637 err_clear_resource: 4638 rtw89_pci_clear_resource(rtwdev, pdev); 4639 err_declaim_pci: 4640 rtw89_pci_declaim_device(rtwdev, pdev); 4641 err_core_deinit: 4642 rtw89_core_deinit(rtwdev); 4643 err_release_hw: 4644 rtw89_free_ieee80211_hw(rtwdev); 4645 4646 return ret; 4647 } 4648 EXPORT_SYMBOL(rtw89_pci_probe); 4649 4650 void rtw89_pci_remove(struct pci_dev *pdev) 4651 { 4652 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4653 struct rtw89_dev *rtwdev; 4654 4655 rtwdev = hw->priv; 4656 4657 rtw89_pci_free_irq(rtwdev, pdev); 4658 rtw89_core_napi_deinit(rtwdev); 4659 rtw89_core_unregister(rtwdev); 4660 rtw89_pci_clear_resource(rtwdev, pdev); 4661 rtw89_pci_declaim_device(rtwdev, pdev); 4662 rtw89_core_deinit(rtwdev); 4663 rtw89_free_ieee80211_hw(rtwdev); 4664 } 4665 EXPORT_SYMBOL(rtw89_pci_remove); 4666 4667 MODULE_AUTHOR("Realtek Corporation"); 4668 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); 4669 MODULE_LICENSE("Dual BSD/GPL"); 4670 #if defined(__FreeBSD__) 4671 MODULE_VERSION(rtw89_pci, 1); 4672 MODULE_DEPEND(rtw89_pci, linuxkpi, 1, 1, 1); 4673 MODULE_DEPEND(rtw89_pci, linuxkpi_wlan, 1, 1, 1); 4674 #ifdef CONFIG_RTW89_DEBUGFS 4675 MODULE_DEPEND(rtw89_pci, lindebugfs, 1, 1, 1); 4676 #endif 4677 #endif 4678