1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #if defined(__FreeBSD__) 6 #define LINUXKPI_PARAM_PREFIX rtw89_pci_ 7 #endif 8 9 #include <linux/pci.h> 10 #if defined(__FreeBSD__) 11 #include <sys/rman.h> 12 #endif 13 14 #include "mac.h" 15 #include "pci.h" 16 #include "reg.h" 17 #include "ser.h" 18 19 static bool rtw89_pci_disable_clkreq; 20 static bool rtw89_pci_disable_aspm_l1; 21 static bool rtw89_pci_disable_l1ss; 22 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 23 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 24 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 25 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 26 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 27 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 28 29 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev, 30 u32 *phy_offset) 31 { 32 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 33 struct pci_dev *pdev = rtwpci->pdev; 34 u32 val; 35 int ret; 36 37 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 38 if (ret) 39 return ret; 40 41 val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK); 42 if (val == RTW89_PCIE_GEN1_SPEED) { 43 *phy_offset = R_RAC_DIRECT_OFFSET_G1; 44 } else if (val == RTW89_PCIE_GEN2_SPEED) { 45 *phy_offset = R_RAC_DIRECT_OFFSET_G2; 46 } else { 47 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val); 48 return -EFAULT; 49 } 50 51 return 0; 52 } 53 54 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) 55 { 56 u32 val; 57 int ret; 58 59 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM); 60 61 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 62 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 63 rtwdev, R_AX_PCIE_INIT_CFG1); 64 65 return ret; 66 } 67 68 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 69 struct rtw89_pci_dma_ring *bd_ring, 70 u32 cur_idx, bool tx) 71 { 72 const struct rtw89_pci_info *info = rtwdev->pci_info; 73 u32 cnt, cur_rp, wp, rp, len; 74 75 rp = bd_ring->rp; 76 wp = bd_ring->wp; 77 len = bd_ring->len; 78 79 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 80 if (tx) { 81 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 82 } else { 83 if (info->rx_ring_eq_is_full) 84 wp += 1; 85 86 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 87 } 88 89 bd_ring->rp = cur_rp; 90 91 return cnt; 92 } 93 94 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 95 struct rtw89_pci_tx_ring *tx_ring) 96 { 97 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 98 u32 addr_idx = bd_ring->addr.idx; 99 u32 cnt, idx; 100 101 idx = rtw89_read32(rtwdev, addr_idx); 102 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 103 104 return cnt; 105 } 106 107 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 108 struct rtw89_pci *rtwpci, 109 u32 cnt, bool release_all) 110 { 111 struct rtw89_pci_tx_data *tx_data; 112 struct sk_buff *skb; 113 u32 qlen; 114 115 while (cnt--) { 116 skb = skb_dequeue(&rtwpci->h2c_queue); 117 if (!skb) { 118 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 119 return; 120 } 121 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 122 } 123 124 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 125 if (!release_all) 126 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 127 128 while (qlen--) { 129 skb = skb_dequeue(&rtwpci->h2c_release_queue); 130 if (!skb) { 131 rtw89_err(rtwdev, "failed to release fwcmd\n"); 132 return; 133 } 134 tx_data = RTW89_PCI_TX_SKB_CB(skb); 135 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 136 DMA_TO_DEVICE); 137 dev_kfree_skb_any(skb); 138 } 139 } 140 141 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 142 struct rtw89_pci *rtwpci) 143 { 144 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 145 u32 cnt; 146 147 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 148 if (!cnt) 149 return; 150 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 151 } 152 153 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 154 struct rtw89_pci_rx_ring *rx_ring) 155 { 156 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 157 u32 addr_idx = bd_ring->addr.idx; 158 u32 cnt, idx; 159 160 idx = rtw89_read32(rtwdev, addr_idx); 161 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 162 163 return cnt; 164 } 165 166 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 167 struct sk_buff *skb) 168 { 169 struct rtw89_pci_rx_info *rx_info; 170 dma_addr_t dma; 171 172 rx_info = RTW89_PCI_RX_SKB_CB(skb); 173 dma = rx_info->dma; 174 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 175 DMA_FROM_DEVICE); 176 } 177 178 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 179 struct sk_buff *skb) 180 { 181 struct rtw89_pci_rx_info *rx_info; 182 dma_addr_t dma; 183 184 rx_info = RTW89_PCI_RX_SKB_CB(skb); 185 dma = rx_info->dma; 186 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 187 DMA_FROM_DEVICE); 188 } 189 190 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 191 struct sk_buff *skb) 192 { 193 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 194 struct rtw89_pci_rxbd_info *rxbd_info; 195 __le32 info; 196 197 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 198 info = rxbd_info->dword; 199 200 rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); 201 rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); 202 rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); 203 rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); 204 } 205 206 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, 207 struct rtw89_pci_rx_ring *rx_ring, 208 struct sk_buff *skb) 209 { 210 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 211 const struct rtw89_pci_info *info = rtwdev->pci_info; 212 u32 target_rx_tag; 213 214 if (!info->check_rx_tag) 215 return 0; 216 217 /* valid range is 1 ~ 0x1FFF */ 218 if (rx_ring->target_rx_tag == 0) 219 target_rx_tag = 1; 220 else 221 target_rx_tag = rx_ring->target_rx_tag; 222 223 if (rx_info->tag != target_rx_tag) { 224 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n", 225 rx_info->tag, target_rx_tag); 226 return -EAGAIN; 227 } 228 229 return 0; 230 } 231 232 static 233 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, 234 struct rtw89_pci_rx_ring *rx_ring, 235 struct sk_buff *skb) 236 { 237 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 238 int rx_tag_retry = 100; 239 int ret; 240 241 do { 242 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 243 rtw89_pci_rxbd_info_update(rtwdev, skb); 244 245 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb); 246 if (ret != -EAGAIN) 247 break; 248 } while (rx_tag_retry--); 249 250 /* update target rx_tag for next RX */ 251 rx_ring->target_rx_tag = rx_info->tag + 1; 252 253 return ret; 254 } 255 256 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable) 257 { 258 const struct rtw89_pci_info *info = rtwdev->pci_info; 259 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 260 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 261 262 if (enable) { 263 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 264 if (dma_stop2->addr) 265 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 266 } else { 267 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 268 if (dma_stop2->addr) 269 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 270 } 271 } 272 273 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable) 274 { 275 const struct rtw89_pci_info *info = rtwdev->pci_info; 276 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 277 278 if (enable) 279 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 280 else 281 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 282 } 283 284 static bool 285 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 286 struct sk_buff *new, 287 const struct sk_buff *skb, u32 offset, 288 const struct rtw89_pci_rx_info *rx_info, 289 const struct rtw89_rx_desc_info *desc_info) 290 { 291 u32 copy_len = rx_info->len - offset; 292 293 if (unlikely(skb_tailroom(new) < copy_len)) { 294 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 295 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 296 rx_info->len, desc_info->pkt_size, offset, fs, ls); 297 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 298 skb->data, rx_info->len); 299 /* length of a single segment skb is desc_info->pkt_size */ 300 if (fs && ls) { 301 copy_len = desc_info->pkt_size; 302 } else { 303 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 304 return false; 305 } 306 } 307 308 skb_put_data(new, skb->data + offset, copy_len); 309 310 return true; 311 } 312 313 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev, 314 struct rtw89_pci_dma_ring *bd_ring) 315 { 316 const struct rtw89_pci_info *info = rtwdev->pci_info; 317 u32 wp = bd_ring->wp; 318 319 if (!info->rx_ring_eq_is_full) 320 return wp; 321 322 if (++wp >= bd_ring->len) 323 wp = 0; 324 325 return wp; 326 } 327 328 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 329 struct rtw89_pci_rx_ring *rx_ring) 330 { 331 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 332 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 333 const struct rtw89_pci_info *info = rtwdev->pci_info; 334 struct sk_buff *new = rx_ring->diliver_skb; 335 struct rtw89_pci_rx_info *rx_info; 336 struct sk_buff *skb; 337 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 338 u32 skb_idx; 339 u32 offset; 340 u32 cnt = 1; 341 bool fs, ls; 342 int ret; 343 344 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 345 skb = rx_ring->buf[skb_idx]; 346 347 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 348 if (ret) { 349 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 350 bd_ring->wp, ret); 351 goto err_sync_device; 352 } 353 354 rx_info = RTW89_PCI_RX_SKB_CB(skb); 355 fs = info->no_rxbd_fs ? !new : rx_info->fs; 356 ls = rx_info->ls; 357 358 if (unlikely(!fs || !ls)) 359 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 360 "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n", 361 fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0); 362 363 if (fs) { 364 if (new) { 365 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 366 "skb should not be ready before first segment start\n"); 367 goto err_sync_device; 368 } 369 if (desc_info->ready) { 370 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 371 goto err_sync_device; 372 } 373 374 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 375 376 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 377 if (!new) 378 goto err_sync_device; 379 380 rx_ring->diliver_skb = new; 381 382 /* first segment has RX desc */ 383 offset = desc_info->offset + desc_info->rxd_len; 384 } else { 385 offset = sizeof(struct rtw89_pci_rxbd_info); 386 if (!new) { 387 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 388 goto err_sync_device; 389 } 390 } 391 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 392 goto err_sync_device; 393 rtw89_pci_sync_skb_for_device(rtwdev, skb); 394 rtw89_pci_rxbd_increase(rx_ring, 1); 395 396 if (!desc_info->ready) { 397 rtw89_warn(rtwdev, "no rx desc information\n"); 398 goto err_free_resource; 399 } 400 if (ls) { 401 rtw89_core_rx(rtwdev, desc_info, new); 402 rx_ring->diliver_skb = NULL; 403 desc_info->ready = false; 404 } 405 406 return cnt; 407 408 err_sync_device: 409 rtw89_pci_sync_skb_for_device(rtwdev, skb); 410 rtw89_pci_rxbd_increase(rx_ring, 1); 411 err_free_resource: 412 if (new) 413 dev_kfree_skb_any(new); 414 rx_ring->diliver_skb = NULL; 415 desc_info->ready = false; 416 417 return cnt; 418 } 419 420 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 421 struct rtw89_pci_rx_ring *rx_ring, 422 u32 cnt) 423 { 424 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 425 u32 rx_cnt; 426 427 while (cnt && rtwdev->napi_budget_countdown > 0) { 428 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 429 if (!rx_cnt) { 430 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 431 432 /* skip the rest RXBD bufs */ 433 rtw89_pci_rxbd_increase(rx_ring, cnt); 434 break; 435 } 436 437 cnt -= rx_cnt; 438 } 439 440 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 441 } 442 443 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 444 struct rtw89_pci *rtwpci, int budget) 445 { 446 struct rtw89_pci_rx_ring *rx_ring; 447 int countdown = rtwdev->napi_budget_countdown; 448 u32 cnt; 449 450 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 451 452 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 453 if (!cnt) 454 return 0; 455 456 cnt = min_t(u32, budget, cnt); 457 458 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 459 460 /* In case of flushing pending SKBs, the countdown may exceed. */ 461 if (rtwdev->napi_budget_countdown <= 0) 462 return budget; 463 464 return budget - countdown; 465 } 466 467 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 468 struct rtw89_pci_tx_ring *tx_ring, 469 struct sk_buff *skb, u8 tx_status) 470 { 471 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 472 struct ieee80211_tx_info *info; 473 474 rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE); 475 476 info = IEEE80211_SKB_CB(skb); 477 ieee80211_tx_info_clear_status(info); 478 479 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 480 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 481 if (tx_status == RTW89_TX_DONE) { 482 info->flags |= IEEE80211_TX_STAT_ACK; 483 tx_ring->tx_acked++; 484 } else { 485 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 486 rtw89_debug(rtwdev, RTW89_DBG_FW, 487 "failed to TX of status %x\n", tx_status); 488 switch (tx_status) { 489 case RTW89_TX_RETRY_LIMIT: 490 tx_ring->tx_retry_lmt++; 491 break; 492 case RTW89_TX_LIFE_TIME: 493 tx_ring->tx_life_time++; 494 break; 495 case RTW89_TX_MACID_DROP: 496 tx_ring->tx_mac_id_drop++; 497 break; 498 default: 499 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 500 break; 501 } 502 } 503 504 ieee80211_tx_status_ni(rtwdev->hw, skb); 505 } 506 507 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 508 { 509 struct rtw89_pci_tx_wd *txwd; 510 u32 cnt; 511 512 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 513 while (cnt--) { 514 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 515 if (!txwd) { 516 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 517 break; 518 } 519 520 list_del_init(&txwd->list); 521 522 /* this skb has been freed by RPP */ 523 if (skb_queue_len(&txwd->queue) == 0) 524 rtw89_pci_enqueue_txwd(tx_ring, txwd); 525 } 526 } 527 528 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 529 struct rtw89_pci_tx_ring *tx_ring) 530 { 531 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 532 struct rtw89_pci_tx_wd *txwd; 533 int i; 534 535 for (i = 0; i < wd_ring->page_num; i++) { 536 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 537 if (!txwd) 538 break; 539 540 list_del_init(&txwd->list); 541 } 542 } 543 544 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 545 struct rtw89_pci_tx_ring *tx_ring, 546 struct rtw89_pci_tx_wd *txwd, u16 seq, 547 u8 tx_status) 548 { 549 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 550 struct rtw89_pci_tx_data *tx_data; 551 struct sk_buff *skb, *tmp; 552 u8 txch = tx_ring->txch; 553 554 if (!list_empty(&txwd->list)) { 555 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 556 /* In low power mode, RPP can receive before updating of TX BD. 557 * In normal mode, it should not happen so give it a warning. 558 */ 559 if (!rtwpci->low_power && !list_empty(&txwd->list)) 560 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 561 txch, seq); 562 } 563 564 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 565 skb_unlink(skb, &txwd->queue); 566 567 tx_data = RTW89_PCI_TX_SKB_CB(skb); 568 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 569 DMA_TO_DEVICE); 570 571 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 572 } 573 574 if (list_empty(&txwd->list)) 575 rtw89_pci_enqueue_txwd(tx_ring, txwd); 576 } 577 578 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 579 struct rtw89_pci_rpp_fmt *rpp) 580 { 581 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 582 struct rtw89_pci_tx_ring *tx_ring; 583 struct rtw89_pci_tx_wd_ring *wd_ring; 584 struct rtw89_pci_tx_wd *txwd; 585 u16 seq; 586 u8 qsel, tx_status, txch; 587 588 seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 589 qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 590 tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 591 txch = rtw89_core_get_ch_dma(rtwdev, qsel); 592 593 if (txch == RTW89_TXCH_CH12) { 594 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 595 return; 596 } 597 598 tx_ring = &rtwpci->tx_rings[txch]; 599 wd_ring = &tx_ring->wd_ring; 600 txwd = &wd_ring->pages[seq]; 601 602 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 603 } 604 605 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 606 struct rtw89_pci_tx_ring *tx_ring) 607 { 608 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 609 struct rtw89_pci_tx_wd *txwd; 610 int i; 611 612 for (i = 0; i < wd_ring->page_num; i++) { 613 txwd = &wd_ring->pages[i]; 614 615 if (!list_empty(&txwd->list)) 616 continue; 617 618 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 619 } 620 } 621 622 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 623 struct rtw89_pci_rx_ring *rx_ring, 624 u32 max_cnt) 625 { 626 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 627 struct rtw89_pci_rx_info *rx_info; 628 struct rtw89_pci_rpp_fmt *rpp; 629 struct rtw89_rx_desc_info desc_info = {}; 630 struct sk_buff *skb; 631 u32 cnt = 0; 632 u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 633 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 634 u32 skb_idx; 635 u32 offset; 636 int ret; 637 638 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 639 skb = rx_ring->buf[skb_idx]; 640 641 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 642 if (ret) { 643 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 644 bd_ring->wp, ret); 645 goto err_sync_device; 646 } 647 648 rx_info = RTW89_PCI_RX_SKB_CB(skb); 649 if (!rx_info->fs || !rx_info->ls) { 650 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 651 return cnt; 652 } 653 654 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 655 656 /* first segment has RX desc */ 657 offset = desc_info.offset + desc_info.rxd_len; 658 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 659 rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 660 rtw89_pci_release_rpp(rtwdev, rpp); 661 } 662 663 rtw89_pci_sync_skb_for_device(rtwdev, skb); 664 rtw89_pci_rxbd_increase(rx_ring, 1); 665 cnt++; 666 667 return cnt; 668 669 err_sync_device: 670 rtw89_pci_sync_skb_for_device(rtwdev, skb); 671 return 0; 672 } 673 674 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 675 struct rtw89_pci_rx_ring *rx_ring, 676 u32 cnt) 677 { 678 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 679 u32 release_cnt; 680 681 while (cnt) { 682 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 683 if (!release_cnt) { 684 rtw89_err(rtwdev, "failed to release TX skbs\n"); 685 686 /* skip the rest RXBD bufs */ 687 rtw89_pci_rxbd_increase(rx_ring, cnt); 688 break; 689 } 690 691 cnt -= release_cnt; 692 } 693 694 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 695 } 696 697 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 698 struct rtw89_pci *rtwpci, int budget) 699 { 700 struct rtw89_pci_rx_ring *rx_ring; 701 u32 cnt; 702 int work_done; 703 704 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 705 706 spin_lock_bh(&rtwpci->trx_lock); 707 708 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 709 if (cnt == 0) 710 goto out_unlock; 711 712 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 713 714 out_unlock: 715 spin_unlock_bh(&rtwpci->trx_lock); 716 717 /* always release all RPQ */ 718 work_done = min_t(int, cnt, budget); 719 rtwdev->napi_budget_countdown -= work_done; 720 721 return work_done; 722 } 723 724 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 725 struct rtw89_pci *rtwpci) 726 { 727 struct rtw89_pci_rx_ring *rx_ring; 728 struct rtw89_pci_dma_ring *bd_ring; 729 u32 reg_idx; 730 u16 hw_idx, hw_idx_next, host_idx; 731 int i; 732 733 for (i = 0; i < RTW89_RXCH_NUM; i++) { 734 rx_ring = &rtwpci->rx_rings[i]; 735 bd_ring = &rx_ring->bd_ring; 736 737 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 738 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 739 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 740 hw_idx_next = (hw_idx + 1) % bd_ring->len; 741 742 if (hw_idx_next == host_idx) 743 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 744 745 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 746 "%d RXD unavailable, idx=0x%08x, len=%d\n", 747 i, reg_idx, bd_ring->len); 748 } 749 } 750 751 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 752 struct rtw89_pci *rtwpci, 753 struct rtw89_pci_isrs *isrs) 754 { 755 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 756 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 757 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 758 759 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 760 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 761 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 762 } 763 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 764 765 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 766 struct rtw89_pci *rtwpci, 767 struct rtw89_pci_isrs *isrs) 768 { 769 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 770 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 771 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 772 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 773 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 774 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 775 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 776 777 if (isrs->halt_c2h_isrs) 778 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 779 if (isrs->isrs[0]) 780 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 781 if (isrs->isrs[1]) 782 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 783 } 784 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 785 786 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, 787 struct rtw89_pci *rtwpci, 788 struct rtw89_pci_isrs *isrs) 789 { 790 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 791 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 792 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 793 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ? 794 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0; 795 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; 796 797 if (isrs->halt_c2h_isrs) 798 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 799 if (isrs->isrs[0]) 800 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]); 801 if (isrs->isrs[1]) 802 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 803 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 804 } 805 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); 806 807 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 808 { 809 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 810 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 811 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 812 } 813 EXPORT_SYMBOL(rtw89_pci_enable_intr); 814 815 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 816 { 817 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 818 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 819 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 820 } 821 EXPORT_SYMBOL(rtw89_pci_disable_intr); 822 823 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 824 { 825 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 826 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 827 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 828 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 829 } 830 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 831 832 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 833 { 834 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 835 } 836 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 837 838 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 839 { 840 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 841 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]); 842 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 843 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 844 } 845 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2); 846 847 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 848 { 849 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 850 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 851 } 852 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); 853 854 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 855 { 856 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 857 unsigned long flags; 858 859 spin_lock_irqsave(&rtwpci->irq_lock, flags); 860 rtw89_chip_disable_intr(rtwdev, rtwpci); 861 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 862 rtw89_chip_enable_intr(rtwdev, rtwpci); 863 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 864 } 865 866 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 867 { 868 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 869 unsigned long flags; 870 871 spin_lock_irqsave(&rtwpci->irq_lock, flags); 872 rtw89_chip_disable_intr(rtwdev, rtwpci); 873 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 874 rtw89_chip_enable_intr(rtwdev, rtwpci); 875 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 876 } 877 878 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 879 { 880 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 881 int budget = NAPI_POLL_WEIGHT; 882 883 /* To prevent RXQ get stuck due to run out of budget. */ 884 rtwdev->napi_budget_countdown = budget; 885 886 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 887 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 888 } 889 890 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 891 { 892 struct rtw89_dev *rtwdev = dev; 893 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 894 const struct rtw89_pci_info *info = rtwdev->pci_info; 895 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 896 struct rtw89_pci_isrs isrs; 897 unsigned long flags; 898 899 spin_lock_irqsave(&rtwpci->irq_lock, flags); 900 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 901 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 902 903 if (unlikely(isrs.isrs[0] & gen_def->isr_rdu)) 904 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 905 906 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h)) 907 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 908 909 if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout)) 910 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 911 912 if (unlikely(rtwpci->under_recovery)) 913 goto enable_intr; 914 915 if (unlikely(rtwpci->low_power)) { 916 rtw89_pci_low_power_interrupt_handler(rtwdev); 917 goto enable_intr; 918 } 919 920 if (likely(rtwpci->running)) { 921 local_bh_disable(); 922 napi_schedule(&rtwdev->napi); 923 local_bh_enable(); 924 } 925 926 return IRQ_HANDLED; 927 928 enable_intr: 929 spin_lock_irqsave(&rtwpci->irq_lock, flags); 930 if (likely(rtwpci->running)) 931 rtw89_chip_enable_intr(rtwdev, rtwpci); 932 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 933 return IRQ_HANDLED; 934 } 935 936 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 937 { 938 struct rtw89_dev *rtwdev = dev; 939 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 940 unsigned long flags; 941 irqreturn_t irqret = IRQ_WAKE_THREAD; 942 943 spin_lock_irqsave(&rtwpci->irq_lock, flags); 944 945 /* If interrupt event is on the road, it is still trigger interrupt 946 * even we have done pci_stop() to turn off IMR. 947 */ 948 if (unlikely(!rtwpci->running)) { 949 irqret = IRQ_HANDLED; 950 goto exit; 951 } 952 953 rtw89_chip_disable_intr(rtwdev, rtwpci); 954 exit: 955 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 956 957 return irqret; 958 } 959 960 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ 961 [RTW89_TXCH_##ch_idx] = { \ 962 .num = R_##gen##_##txch##_TXBD_NUM ##v, \ 963 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 964 .bdram = 0, \ 965 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \ 966 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \ 967 } 968 969 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 970 [RTW89_TXCH_##txch] = { \ 971 .num = R_AX_##txch##_TXBD_NUM ##v, \ 972 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 973 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 974 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 975 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 976 } 977 978 #define DEF_TXCHADDRS(info, txch, v...) \ 979 [RTW89_TXCH_##txch] = { \ 980 .num = R_AX_##txch##_TXBD_NUM, \ 981 .idx = R_AX_##txch##_TXBD_IDX, \ 982 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 983 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 984 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 985 } 986 987 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ 988 [RTW89_RXCH_##ch_idx] = { \ 989 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \ 990 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \ 991 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \ 992 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \ 993 } 994 995 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 996 .tx = { 997 DEF_TXCHADDRS(info, ACH0), 998 DEF_TXCHADDRS(info, ACH1), 999 DEF_TXCHADDRS(info, ACH2), 1000 DEF_TXCHADDRS(info, ACH3), 1001 DEF_TXCHADDRS(info, ACH4), 1002 DEF_TXCHADDRS(info, ACH5), 1003 DEF_TXCHADDRS(info, ACH6), 1004 DEF_TXCHADDRS(info, ACH7), 1005 DEF_TXCHADDRS(info, CH8), 1006 DEF_TXCHADDRS(info, CH9), 1007 DEF_TXCHADDRS_TYPE1(info, CH10), 1008 DEF_TXCHADDRS_TYPE1(info, CH11), 1009 DEF_TXCHADDRS(info, CH12), 1010 }, 1011 .rx = { 1012 DEF_RXCHADDRS(AX, RXQ, RXQ), 1013 DEF_RXCHADDRS(AX, RPQ, RPQ), 1014 }, 1015 }; 1016 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 1017 1018 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 1019 .tx = { 1020 DEF_TXCHADDRS(info, ACH0, _V1), 1021 DEF_TXCHADDRS(info, ACH1, _V1), 1022 DEF_TXCHADDRS(info, ACH2, _V1), 1023 DEF_TXCHADDRS(info, ACH3, _V1), 1024 DEF_TXCHADDRS(info, ACH4, _V1), 1025 DEF_TXCHADDRS(info, ACH5, _V1), 1026 DEF_TXCHADDRS(info, ACH6, _V1), 1027 DEF_TXCHADDRS(info, ACH7, _V1), 1028 DEF_TXCHADDRS(info, CH8, _V1), 1029 DEF_TXCHADDRS(info, CH9, _V1), 1030 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 1031 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 1032 DEF_TXCHADDRS(info, CH12, _V1), 1033 }, 1034 .rx = { 1035 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1), 1036 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1), 1037 }, 1038 }; 1039 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 1040 1041 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = { 1042 .tx = { 1043 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1), 1044 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1), 1045 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1), 1046 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1), 1047 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1), 1048 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1), 1049 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1), 1050 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1), 1051 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1), 1052 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1), 1053 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1), 1054 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1), 1055 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1), 1056 }, 1057 .rx = { 1058 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1), 1059 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1), 1060 }, 1061 }; 1062 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); 1063 1064 #undef DEF_TXCHADDRS_TYPE1 1065 #undef DEF_TXCHADDRS 1066 #undef DEF_RXCHADDRS 1067 1068 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 1069 enum rtw89_tx_channel txch, 1070 const struct rtw89_pci_ch_dma_addr **addr) 1071 { 1072 const struct rtw89_pci_info *info = rtwdev->pci_info; 1073 1074 if (txch >= RTW89_TXCH_NUM) 1075 return -EINVAL; 1076 1077 *addr = &info->dma_addr_set->tx[txch]; 1078 1079 return 0; 1080 } 1081 1082 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 1083 enum rtw89_rx_channel rxch, 1084 const struct rtw89_pci_ch_dma_addr **addr) 1085 { 1086 const struct rtw89_pci_info *info = rtwdev->pci_info; 1087 1088 if (rxch >= RTW89_RXCH_NUM) 1089 return -EINVAL; 1090 1091 *addr = &info->dma_addr_set->rx[rxch]; 1092 1093 return 0; 1094 } 1095 1096 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 1097 { 1098 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 1099 1100 /* reserved 1 desc check ring is full or not */ 1101 if (bd_ring->rp > bd_ring->wp) 1102 return bd_ring->rp - bd_ring->wp - 1; 1103 1104 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 1105 } 1106 1107 static 1108 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 1109 { 1110 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1111 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 1112 u32 cnt; 1113 1114 spin_lock_bh(&rtwpci->trx_lock); 1115 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 1116 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1117 spin_unlock_bh(&rtwpci->trx_lock); 1118 1119 return cnt; 1120 } 1121 1122 static 1123 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 1124 u8 txch) 1125 { 1126 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1127 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1128 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1129 u32 cnt; 1130 1131 spin_lock_bh(&rtwpci->trx_lock); 1132 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1133 if (txch != RTW89_TXCH_CH12) 1134 cnt = min(cnt, wd_ring->curr_num); 1135 spin_unlock_bh(&rtwpci->trx_lock); 1136 1137 return cnt; 1138 } 1139 1140 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1141 u8 txch) 1142 { 1143 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1144 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1145 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1146 const struct rtw89_chip_info *chip = rtwdev->chip; 1147 u32 bd_cnt, wd_cnt, min_cnt = 0; 1148 struct rtw89_pci_rx_ring *rx_ring; 1149 enum rtw89_debug_mask debug_mask; 1150 u32 cnt; 1151 1152 rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 1153 1154 spin_lock_bh(&rtwpci->trx_lock); 1155 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1156 wd_cnt = wd_ring->curr_num; 1157 1158 if (wd_cnt == 0 || bd_cnt == 0) { 1159 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 1160 if (cnt) 1161 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 1162 else if (wd_cnt == 0) 1163 goto out_unlock; 1164 1165 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1166 if (bd_cnt == 0) 1167 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 1168 } 1169 1170 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1171 wd_cnt = wd_ring->curr_num; 1172 min_cnt = min(bd_cnt, wd_cnt); 1173 if (min_cnt == 0) { 1174 /* This message can be frequently shown in low power mode or 1175 * high traffic with small FIFO chips, and we have recognized it as normal 1176 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1177 */ 1178 if (rtwpci->low_power || chip->small_fifo_size) 1179 debug_mask = RTW89_DBG_TXRX; 1180 else 1181 debug_mask = RTW89_DBG_UNEXP; 1182 1183 rtw89_debug(rtwdev, debug_mask, 1184 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1185 wd_cnt, bd_cnt); 1186 } 1187 1188 out_unlock: 1189 spin_unlock_bh(&rtwpci->trx_lock); 1190 1191 return min_cnt; 1192 } 1193 1194 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1195 u8 txch) 1196 { 1197 if (rtwdev->hci.paused) 1198 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1199 1200 if (txch == RTW89_TXCH_CH12) 1201 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1202 1203 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1204 } 1205 1206 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1207 { 1208 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1209 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1210 u32 host_idx, addr; 1211 1212 spin_lock_bh(&rtwpci->trx_lock); 1213 1214 addr = bd_ring->addr.idx; 1215 host_idx = bd_ring->wp; 1216 rtw89_write16(rtwdev, addr, host_idx); 1217 1218 spin_unlock_bh(&rtwpci->trx_lock); 1219 } 1220 1221 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1222 int n_txbd) 1223 { 1224 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1225 u32 host_idx, len; 1226 1227 len = bd_ring->len; 1228 host_idx = bd_ring->wp + n_txbd; 1229 host_idx = host_idx < len ? host_idx : host_idx - len; 1230 1231 bd_ring->wp = host_idx; 1232 } 1233 1234 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1235 { 1236 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1237 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1238 1239 if (rtwdev->hci.paused) { 1240 set_bit(txch, rtwpci->kick_map); 1241 return; 1242 } 1243 1244 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1245 } 1246 1247 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1248 { 1249 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1250 struct rtw89_pci_tx_ring *tx_ring; 1251 int txch; 1252 1253 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1254 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1255 continue; 1256 1257 tx_ring = &rtwpci->tx_rings[txch]; 1258 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1259 } 1260 } 1261 1262 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1263 { 1264 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1265 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1266 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1267 u32 cur_idx, cur_rp; 1268 u8 i; 1269 1270 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1271 * define a reasonable fixed total timeout to use read_poll_timeout* 1272 * helper. Instead, we can ensure a reasonable polling times, so we 1273 * just use for loop with udelay here. 1274 */ 1275 for (i = 0; i < 60; i++) { 1276 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1277 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1278 if (cur_rp == bd_ring->wp) 1279 return; 1280 1281 udelay(1); 1282 } 1283 1284 if (!drop) 1285 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1286 } 1287 1288 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1289 bool drop) 1290 { 1291 const struct rtw89_pci_info *info = rtwdev->pci_info; 1292 u8 i; 1293 1294 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1295 /* It may be unnecessary to flush FWCMD queue. */ 1296 if (i == RTW89_TXCH_CH12) 1297 continue; 1298 if (info->tx_dma_ch_mask & BIT(i)) 1299 continue; 1300 1301 if (txchs & BIT(i)) 1302 __pci_flush_txch(rtwdev, i, drop); 1303 } 1304 } 1305 1306 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1307 bool drop) 1308 { 1309 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1310 } 1311 1312 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1313 void *txaddr_info_addr, u32 total_len, 1314 dma_addr_t dma, u8 *add_info_nr) 1315 { 1316 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1317 __le16 option; 1318 1319 txaddr_info->length = cpu_to_le16(total_len); 1320 option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1)); 1321 option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK); 1322 txaddr_info->option = option; 1323 txaddr_info->dma = cpu_to_le32(dma); 1324 1325 *add_info_nr = 1; 1326 1327 return sizeof(*txaddr_info); 1328 } 1329 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1330 1331 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1332 void *txaddr_info_addr, u32 total_len, 1333 dma_addr_t dma, u8 *add_info_nr) 1334 { 1335 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1336 u32 remain = total_len; 1337 u32 len; 1338 u16 length_option; 1339 int n; 1340 1341 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1342 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1343 TXADDR_INFO_LENTHG_V1_MAX : remain; 1344 remain -= len; 1345 1346 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1347 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1348 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1349 length_option |= u16_encode_bits(upper_32_bits(dma), 1350 B_PCIADDR_HIGH_SEL_V1_MASK); 1351 txaddr_info->length_opt = cpu_to_le16(length_option); 1352 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1353 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1354 1355 dma += len; 1356 txaddr_info++; 1357 } 1358 1359 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1360 remain, total_len); 1361 1362 *add_info_nr = n; 1363 1364 return n * sizeof(*txaddr_info); 1365 } 1366 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1367 1368 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1369 struct rtw89_pci_tx_ring *tx_ring, 1370 struct rtw89_pci_tx_wd *txwd, 1371 struct rtw89_core_tx_request *tx_req) 1372 { 1373 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1374 const struct rtw89_chip_info *chip = rtwdev->chip; 1375 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1376 struct rtw89_pci_tx_wp_info *txwp_info; 1377 void *txaddr_info_addr; 1378 struct pci_dev *pdev = rtwpci->pdev; 1379 struct sk_buff *skb = tx_req->skb; 1380 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1381 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 1382 bool en_wd_info = desc_info->en_wd_info; 1383 u32 txwd_len; 1384 u32 txwp_len; 1385 u32 txaddr_info_len; 1386 dma_addr_t dma; 1387 int ret; 1388 1389 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1390 if (dma_mapping_error(&pdev->dev, dma)) { 1391 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1392 ret = -EBUSY; 1393 goto err; 1394 } 1395 1396 tx_data->dma = dma; 1397 rcu_assign_pointer(skb_data->wait, NULL); 1398 1399 txwp_len = sizeof(*txwp_info); 1400 txwd_len = chip->txwd_body_size; 1401 txwd_len += en_wd_info ? chip->txwd_info_size : 0; 1402 1403 #if defined(__linux__) 1404 txwp_info = txwd->vaddr + txwd_len; 1405 #elif defined(__FreeBSD__) 1406 txwp_info = (struct rtw89_pci_tx_wp_info *)((u8 *)txwd->vaddr + txwd_len); 1407 #endif 1408 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1409 txwp_info->seq1 = 0; 1410 txwp_info->seq2 = 0; 1411 txwp_info->seq3 = 0; 1412 1413 tx_ring->tx_cnt++; 1414 #if defined(__linux__) 1415 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1416 #elif defined(__FreeBSD__) 1417 txaddr_info_addr = (u8 *)txwd->vaddr + txwd_len + txwp_len; 1418 #endif 1419 txaddr_info_len = 1420 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1421 dma, &desc_info->addr_info_nr); 1422 1423 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1424 1425 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1426 1427 skb_queue_tail(&txwd->queue, skb); 1428 1429 return 0; 1430 1431 err: 1432 return ret; 1433 } 1434 1435 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1436 struct rtw89_pci_tx_ring *tx_ring, 1437 struct rtw89_pci_tx_bd_32 *txbd, 1438 struct rtw89_core_tx_request *tx_req) 1439 { 1440 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1441 const struct rtw89_chip_info *chip = rtwdev->chip; 1442 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1443 void *txdesc; 1444 int txdesc_size = chip->h2c_desc_size; 1445 struct pci_dev *pdev = rtwpci->pdev; 1446 struct sk_buff *skb = tx_req->skb; 1447 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1448 dma_addr_t dma; 1449 __le16 opt; 1450 1451 txdesc = skb_push(skb, txdesc_size); 1452 memset(txdesc, 0, txdesc_size); 1453 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1454 1455 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1456 if (dma_mapping_error(&pdev->dev, dma)) { 1457 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1458 return -EBUSY; 1459 } 1460 1461 tx_data->dma = dma; 1462 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1463 opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI); 1464 txbd->opt = opt; 1465 txbd->length = cpu_to_le16(skb->len); 1466 txbd->dma = cpu_to_le32(tx_data->dma); 1467 skb_queue_tail(&rtwpci->h2c_queue, skb); 1468 1469 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1470 1471 return 0; 1472 } 1473 1474 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1475 struct rtw89_pci_tx_ring *tx_ring, 1476 struct rtw89_pci_tx_bd_32 *txbd, 1477 struct rtw89_core_tx_request *tx_req) 1478 { 1479 struct rtw89_pci_tx_wd *txwd; 1480 __le16 opt; 1481 int ret; 1482 1483 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1484 * buffer with WD BODY only. So here we don't need to check the free 1485 * pages of the wd ring. 1486 */ 1487 if (tx_ring->txch == RTW89_TXCH_CH12) 1488 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1489 1490 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1491 if (!txwd) { 1492 rtw89_err(rtwdev, "no available TXWD\n"); 1493 ret = -ENOSPC; 1494 goto err; 1495 } 1496 1497 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1498 if (ret) { 1499 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1500 goto err_enqueue_wd; 1501 } 1502 1503 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1504 1505 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1506 opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI); 1507 txbd->opt = opt; 1508 txbd->length = cpu_to_le16(txwd->len); 1509 txbd->dma = cpu_to_le32(txwd->paddr); 1510 1511 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1512 1513 return 0; 1514 1515 err_enqueue_wd: 1516 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1517 err: 1518 return ret; 1519 } 1520 1521 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1522 u8 txch) 1523 { 1524 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1525 struct rtw89_pci_tx_ring *tx_ring; 1526 struct rtw89_pci_tx_bd_32 *txbd; 1527 u32 n_avail_txbd; 1528 int ret = 0; 1529 1530 /* check the tx type and dma channel for fw cmd queue */ 1531 if ((txch == RTW89_TXCH_CH12 || 1532 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1533 (txch != RTW89_TXCH_CH12 || 1534 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1535 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1536 return -EINVAL; 1537 } 1538 1539 tx_ring = &rtwpci->tx_rings[txch]; 1540 spin_lock_bh(&rtwpci->trx_lock); 1541 1542 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1543 if (n_avail_txbd == 0) { 1544 rtw89_err(rtwdev, "no available TXBD\n"); 1545 ret = -ENOSPC; 1546 goto err_unlock; 1547 } 1548 1549 txbd = rtw89_pci_get_next_txbd(tx_ring); 1550 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1551 if (ret) { 1552 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1553 goto err_unlock; 1554 } 1555 1556 spin_unlock_bh(&rtwpci->trx_lock); 1557 return 0; 1558 1559 err_unlock: 1560 spin_unlock_bh(&rtwpci->trx_lock); 1561 return ret; 1562 } 1563 1564 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1565 { 1566 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1567 int ret; 1568 1569 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1570 if (ret) { 1571 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1572 return ret; 1573 } 1574 1575 return 0; 1576 } 1577 1578 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1579 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1580 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1581 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1582 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1583 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1584 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1585 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1586 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1587 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1588 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1589 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1590 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1591 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1592 }; 1593 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1594 1595 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1596 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1597 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1598 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1599 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1600 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1601 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1602 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1603 }; 1604 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1605 1606 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev) 1607 { 1608 const struct rtw89_pci_info *info = rtwdev->pci_info; 1609 u32 addr = info->wp_sel_addr; 1610 u32 val; 1611 int i; 1612 1613 if (!info->wp_sel_addr) 1614 return; 1615 1616 for (i = 0; i < 16; i += 4) { 1617 val = u32_encode_bits(i + 0, MASKBYTE0) | 1618 u32_encode_bits(i + 1, MASKBYTE1) | 1619 u32_encode_bits(i + 2, MASKBYTE2) | 1620 u32_encode_bits(i + 3, MASKBYTE3); 1621 rtw89_write32(rtwdev, addr + i, val); 1622 } 1623 } 1624 1625 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1626 { 1627 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1628 const struct rtw89_pci_info *info = rtwdev->pci_info; 1629 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1630 struct rtw89_pci_tx_ring *tx_ring; 1631 struct rtw89_pci_rx_ring *rx_ring; 1632 struct rtw89_pci_dma_ring *bd_ring; 1633 const struct rtw89_pci_bd_ram *bd_ram; 1634 u32 addr_num; 1635 u32 addr_idx; 1636 u32 addr_bdram; 1637 u32 addr_desa_l; 1638 u32 val32; 1639 int i; 1640 1641 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1642 if (info->tx_dma_ch_mask & BIT(i)) 1643 continue; 1644 1645 tx_ring = &rtwpci->tx_rings[i]; 1646 bd_ring = &tx_ring->bd_ring; 1647 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; 1648 addr_num = bd_ring->addr.num; 1649 addr_bdram = bd_ring->addr.bdram; 1650 addr_desa_l = bd_ring->addr.desa_l; 1651 bd_ring->wp = 0; 1652 bd_ring->rp = 0; 1653 1654 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1655 if (addr_bdram && bd_ram) { 1656 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1657 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1658 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1659 1660 rtw89_write32(rtwdev, addr_bdram, val32); 1661 } 1662 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1663 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1664 } 1665 1666 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1667 rx_ring = &rtwpci->rx_rings[i]; 1668 bd_ring = &rx_ring->bd_ring; 1669 addr_num = bd_ring->addr.num; 1670 addr_idx = bd_ring->addr.idx; 1671 addr_desa_l = bd_ring->addr.desa_l; 1672 if (info->rx_ring_eq_is_full) 1673 bd_ring->wp = bd_ring->len - 1; 1674 else 1675 bd_ring->wp = 0; 1676 bd_ring->rp = 0; 1677 rx_ring->diliver_skb = NULL; 1678 rx_ring->diliver_desc.ready = false; 1679 rx_ring->target_rx_tag = 0; 1680 1681 rtw89_write16(rtwdev, addr_num, bd_ring->len); 1682 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1683 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1684 1685 if (info->rx_ring_eq_is_full) 1686 rtw89_write16(rtwdev, addr_idx, bd_ring->wp); 1687 } 1688 1689 rtw89_pci_init_wp_16sel(rtwdev); 1690 } 1691 1692 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1693 struct rtw89_pci_tx_ring *tx_ring) 1694 { 1695 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1696 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1697 } 1698 1699 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1700 { 1701 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1702 const struct rtw89_pci_info *info = rtwdev->pci_info; 1703 int txch; 1704 1705 rtw89_pci_reset_trx_rings(rtwdev); 1706 1707 spin_lock_bh(&rtwpci->trx_lock); 1708 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1709 if (info->tx_dma_ch_mask & BIT(txch)) 1710 continue; 1711 if (txch == RTW89_TXCH_CH12) { 1712 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1713 skb_queue_len(&rtwpci->h2c_queue), true); 1714 continue; 1715 } 1716 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1717 } 1718 spin_unlock_bh(&rtwpci->trx_lock); 1719 } 1720 1721 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1722 { 1723 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1724 unsigned long flags; 1725 1726 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1727 rtwpci->running = true; 1728 rtw89_chip_enable_intr(rtwdev, rtwpci); 1729 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1730 } 1731 1732 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1733 { 1734 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1735 unsigned long flags; 1736 1737 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1738 rtwpci->running = false; 1739 rtw89_chip_disable_intr(rtwdev, rtwpci); 1740 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1741 } 1742 1743 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1744 { 1745 rtw89_core_napi_start(rtwdev); 1746 rtw89_pci_enable_intr_lock(rtwdev); 1747 1748 return 0; 1749 } 1750 1751 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1752 { 1753 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1754 struct pci_dev *pdev = rtwpci->pdev; 1755 1756 rtw89_pci_disable_intr_lock(rtwdev); 1757 synchronize_irq(pdev->irq); 1758 rtw89_core_napi_stop(rtwdev); 1759 } 1760 1761 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1762 { 1763 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1764 struct pci_dev *pdev = rtwpci->pdev; 1765 1766 if (pause) { 1767 rtw89_pci_disable_intr_lock(rtwdev); 1768 synchronize_irq(pdev->irq); 1769 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1770 napi_synchronize(&rtwdev->napi); 1771 } else { 1772 rtw89_pci_enable_intr_lock(rtwdev); 1773 rtw89_pci_tx_kick_off_pending(rtwdev); 1774 } 1775 } 1776 1777 static 1778 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1779 { 1780 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1781 const struct rtw89_pci_info *info = rtwdev->pci_info; 1782 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1783 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1784 struct rtw89_pci_tx_ring *tx_ring; 1785 struct rtw89_pci_rx_ring *rx_ring; 1786 int i; 1787 1788 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1789 return; 1790 1791 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1792 tx_ring = &rtwpci->tx_rings[i]; 1793 tx_ring->bd_ring.addr.idx = low_power ? 1794 bd_idx_addr->tx_bd_addrs[i] : 1795 dma_addr_set->tx[i].idx; 1796 } 1797 1798 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1799 rx_ring = &rtwpci->rx_rings[i]; 1800 rx_ring->bd_ring.addr.idx = low_power ? 1801 bd_idx_addr->rx_bd_addrs[i] : 1802 dma_addr_set->rx[i].idx; 1803 } 1804 } 1805 1806 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1807 { 1808 enum rtw89_pci_intr_mask_cfg cfg; 1809 1810 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1811 1812 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1813 rtw89_chip_config_intr_mask(rtwdev, cfg); 1814 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1815 } 1816 1817 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1818 1819 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1820 { 1821 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1822 #if defined(__linux__) 1823 u32 val = readl(rtwpci->mmap + addr); 1824 #elif defined(__FreeBSD__) 1825 u32 val; 1826 1827 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1828 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1829 #endif 1830 int count; 1831 1832 for (count = 0; ; count++) { 1833 if (val != RTW89_R32_DEAD) 1834 return val; 1835 if (count >= MAC_REG_POOL_COUNT) { 1836 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1837 return RTW89_R32_DEAD; 1838 } 1839 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1840 #if defined(__linux__) 1841 val = readl(rtwpci->mmap + addr); 1842 #elif defined(__FreeBSD__) 1843 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1844 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1845 #endif 1846 } 1847 1848 return val; 1849 } 1850 1851 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1852 { 1853 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1854 u32 addr32, val32, shift; 1855 1856 if (!ACCESS_CMAC(addr)) 1857 #if defined(__linux__) 1858 return readb(rtwpci->mmap + addr); 1859 #elif defined(__FreeBSD__) 1860 { 1861 u8 val; 1862 1863 val = bus_read_1((struct resource *)rtwpci->mmap, addr); 1864 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); 1865 return (val); 1866 } 1867 #endif 1868 1869 addr32 = addr & ~0x3; 1870 shift = (addr & 0x3) * 8; 1871 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1872 return val32 >> shift; 1873 } 1874 1875 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1876 { 1877 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1878 u32 addr32, val32, shift; 1879 1880 if (!ACCESS_CMAC(addr)) 1881 #if defined(__linux__) 1882 return readw(rtwpci->mmap + addr); 1883 #elif defined(__FreeBSD__) 1884 { 1885 u16 val; 1886 1887 val = bus_read_2((struct resource *)rtwpci->mmap, addr); 1888 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); 1889 return (val); 1890 } 1891 #endif 1892 1893 addr32 = addr & ~0x3; 1894 shift = (addr & 0x3) * 8; 1895 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1896 return val32 >> shift; 1897 } 1898 1899 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1900 { 1901 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1902 1903 if (!ACCESS_CMAC(addr)) 1904 #if defined(__linux__) 1905 return readl(rtwpci->mmap + addr); 1906 #elif defined(__FreeBSD__) 1907 { 1908 u32 val; 1909 1910 val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1911 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1912 return (val); 1913 } 1914 #endif 1915 1916 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1917 } 1918 1919 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1920 { 1921 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1922 1923 #if defined(__linux__) 1924 writeb(data, rtwpci->mmap + addr); 1925 #elif defined(__FreeBSD__) 1926 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, data); 1927 return (bus_write_1((struct resource *)rtwpci->mmap, addr, data)); 1928 #endif 1929 } 1930 1931 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1932 { 1933 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1934 1935 #if defined(__linux__) 1936 writew(data, rtwpci->mmap + addr); 1937 #elif defined(__FreeBSD__) 1938 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, data); 1939 return (bus_write_2((struct resource *)rtwpci->mmap, addr, data)); 1940 #endif 1941 } 1942 1943 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1944 { 1945 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1946 1947 #if defined(__linux__) 1948 writel(data, rtwpci->mmap + addr); 1949 #elif defined(__FreeBSD__) 1950 rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, data); 1951 return (bus_write_4((struct resource *)rtwpci->mmap, addr, data)); 1952 #endif 1953 } 1954 1955 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 1956 { 1957 const struct rtw89_pci_info *info = rtwdev->pci_info; 1958 1959 if (enable) 1960 rtw89_write32_set(rtwdev, info->init_cfg_reg, 1961 info->rxhci_en_bit | info->txhci_en_bit); 1962 else 1963 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 1964 info->rxhci_en_bit | info->txhci_en_bit); 1965 } 1966 1967 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 1968 { 1969 const struct rtw89_pci_info *info = rtwdev->pci_info; 1970 const struct rtw89_reg_def *reg = &info->dma_io_stop; 1971 1972 if (enable) 1973 rtw89_write32_clr(rtwdev, reg->addr, reg->mask); 1974 else 1975 rtw89_write32_set(rtwdev, reg->addr, reg->mask); 1976 } 1977 1978 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1979 { 1980 rtw89_pci_ctrl_dma_io(rtwdev, enable); 1981 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 1982 } 1983 1984 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1985 { 1986 u16 val; 1987 1988 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1989 1990 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1991 switch (speed) { 1992 case PCIE_PHY_GEN1: 1993 if (addr < 0x20) 1994 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1995 else 1996 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1997 break; 1998 case PCIE_PHY_GEN2: 1999 if (addr < 0x20) 2000 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 2001 else 2002 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 2003 break; 2004 default: 2005 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 2006 return -EINVAL; 2007 } 2008 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 2009 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 2010 2011 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 2012 false, rtwdev, R_AX_MDIO_CFG); 2013 } 2014 2015 static int 2016 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 2017 { 2018 int ret; 2019 2020 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 2021 if (ret) { 2022 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 2023 return ret; 2024 } 2025 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 2026 2027 return 0; 2028 } 2029 2030 static int 2031 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 2032 { 2033 int ret; 2034 2035 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 2036 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 2037 if (ret) { 2038 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 2039 return ret; 2040 } 2041 2042 return 0; 2043 } 2044 2045 static int 2046 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 2047 { 2048 u32 shift; 2049 int ret; 2050 u16 val; 2051 2052 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2053 if (ret) 2054 return ret; 2055 2056 shift = __ffs(mask); 2057 val &= ~mask; 2058 val |= ((data << shift) & mask); 2059 2060 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 2061 if (ret) 2062 return ret; 2063 2064 return 0; 2065 } 2066 2067 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2068 { 2069 int ret; 2070 u16 val; 2071 2072 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2073 if (ret) 2074 return ret; 2075 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 2076 if (ret) 2077 return ret; 2078 2079 return 0; 2080 } 2081 2082 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2083 { 2084 int ret; 2085 u16 val; 2086 2087 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2088 if (ret) 2089 return ret; 2090 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 2091 if (ret) 2092 return ret; 2093 2094 return 0; 2095 } 2096 2097 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) 2098 { 2099 u16 addr_2lsb = addr & B_AX_DBI_2LSB; 2100 u16 write_addr; 2101 u8 flag; 2102 int ret; 2103 2104 write_addr = addr & B_AX_DBI_ADDR_MSK; 2105 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK); 2106 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data); 2107 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); 2108 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); 2109 2110 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2111 10 * RTW89_PCI_WR_RETRY_CNT, false, 2112 rtwdev, R_AX_DBI_FLAG + 2); 2113 if (ret) 2114 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n", 2115 addr); 2116 2117 return ret; 2118 } 2119 2120 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) 2121 { 2122 u16 read_addr = addr & B_AX_DBI_ADDR_MSK; 2123 u8 flag; 2124 int ret; 2125 2126 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); 2127 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); 2128 2129 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2130 10 * RTW89_PCI_WR_RETRY_CNT, false, 2131 rtwdev, R_AX_DBI_FLAG + 2); 2132 if (ret) { 2133 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n", 2134 addr); 2135 return ret; 2136 } 2137 2138 read_addr = R_AX_DBI_RDATA + (addr & 3); 2139 *value = rtw89_read8(rtwdev, read_addr); 2140 2141 return 0; 2142 } 2143 2144 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2145 u8 data) 2146 { 2147 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2148 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2149 struct pci_dev *pdev = rtwpci->pdev; 2150 int ret; 2151 2152 ret = pci_write_config_byte(pdev, addr, data); 2153 if (!ret) 2154 return 0; 2155 2156 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2157 ret = rtw89_dbi_write8(rtwdev, addr, data); 2158 2159 return ret; 2160 } 2161 2162 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2163 u8 *value) 2164 { 2165 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2166 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2167 struct pci_dev *pdev = rtwpci->pdev; 2168 int ret; 2169 2170 ret = pci_read_config_byte(pdev, addr, value); 2171 if (!ret) 2172 return 0; 2173 2174 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2175 ret = rtw89_dbi_read8(rtwdev, addr, value); 2176 2177 return ret; 2178 } 2179 2180 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 2181 u8 bit) 2182 { 2183 u8 value; 2184 int ret; 2185 2186 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2187 if (ret) 2188 return ret; 2189 2190 value |= bit; 2191 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2192 2193 return ret; 2194 } 2195 2196 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 2197 u8 bit) 2198 { 2199 u8 value; 2200 int ret; 2201 2202 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2203 if (ret) 2204 return ret; 2205 2206 value &= ~bit; 2207 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2208 2209 return ret; 2210 } 2211 2212 static int 2213 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 2214 { 2215 u16 val, tar; 2216 int ret; 2217 2218 /* Enable counter */ 2219 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 2220 if (ret) 2221 return ret; 2222 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2223 phy_rate); 2224 if (ret) 2225 return ret; 2226 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 2227 phy_rate); 2228 if (ret) 2229 return ret; 2230 2231 fsleep(300); 2232 2233 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 2234 if (ret) 2235 return ret; 2236 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2237 phy_rate); 2238 if (ret) 2239 return ret; 2240 2241 tar = tar & 0x0FFF; 2242 if (tar == 0 || tar == 0x0FFF) { 2243 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 2244 return -EINVAL; 2245 } 2246 2247 *target = tar; 2248 2249 return 0; 2250 } 2251 2252 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 2253 { 2254 int ret; 2255 2256 if (!rtw89_is_rtl885xb(rtwdev)) 2257 return 0; 2258 2259 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 2260 PCIE_AUTOK_4, PCIE_PHY_GEN1); 2261 return ret; 2262 } 2263 2264 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 2265 { 2266 enum rtw89_pcie_phy phy_rate; 2267 u16 val16, mgn_set, div_set, tar; 2268 u8 val8, bdr_ori; 2269 bool l1_flag = false; 2270 int ret = 0; 2271 2272 if (!rtw89_is_rtl885xb(rtwdev)) 2273 return 0; 2274 2275 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 2276 if (ret) { 2277 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 2278 RTW89_PCIE_PHY_RATE); 2279 return ret; 2280 } 2281 2282 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 2283 phy_rate = PCIE_PHY_GEN1; 2284 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 2285 phy_rate = PCIE_PHY_GEN2; 2286 } else { 2287 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 2288 return -EOPNOTSUPP; 2289 } 2290 /* Disable L1BD */ 2291 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 2292 if (ret) { 2293 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 2294 return ret; 2295 } 2296 2297 if (bdr_ori & RTW89_PCIE_BIT_L1) { 2298 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2299 bdr_ori & ~RTW89_PCIE_BIT_L1); 2300 if (ret) { 2301 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2302 RTW89_PCIE_L1_CTRL); 2303 return ret; 2304 } 2305 l1_flag = true; 2306 } 2307 2308 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2309 if (ret) { 2310 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2311 goto end; 2312 } 2313 2314 if (val16 & B_AX_CALIB_EN) { 2315 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 2316 val16 & ~B_AX_CALIB_EN, phy_rate); 2317 if (ret) { 2318 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2319 goto end; 2320 } 2321 } 2322 2323 if (!autook_en) 2324 goto end; 2325 /* Set div */ 2326 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 2327 if (ret) { 2328 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2329 goto end; 2330 } 2331 2332 /* Obtain div and margin */ 2333 ret = __get_target(rtwdev, &tar, phy_rate); 2334 if (ret) { 2335 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2336 goto end; 2337 } 2338 2339 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2340 2341 if (mgn_set >= 128) { 2342 div_set = 0x0003; 2343 mgn_set = 0x000F; 2344 } else if (mgn_set >= 64) { 2345 div_set = 0x0003; 2346 mgn_set >>= 3; 2347 } else if (mgn_set >= 32) { 2348 div_set = 0x0002; 2349 mgn_set >>= 2; 2350 } else if (mgn_set >= 16) { 2351 div_set = 0x0001; 2352 mgn_set >>= 1; 2353 } else if (mgn_set == 0) { 2354 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2355 goto end; 2356 } else { 2357 div_set = 0x0000; 2358 } 2359 2360 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2361 if (ret) { 2362 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2363 goto end; 2364 } 2365 2366 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2367 2368 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2369 if (ret) { 2370 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2371 goto end; 2372 } 2373 2374 ret = __get_target(rtwdev, &tar, phy_rate); 2375 if (ret) { 2376 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2377 goto end; 2378 } 2379 2380 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2381 tar, div_set, mgn_set); 2382 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2383 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2384 if (ret) { 2385 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2386 goto end; 2387 } 2388 2389 /* Enable function */ 2390 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2391 if (ret) { 2392 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2393 goto end; 2394 } 2395 2396 /* CLK delay = 0 */ 2397 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2398 PCIE_CLKDLY_HW_0); 2399 2400 end: 2401 /* Set L1BD to ori */ 2402 if (l1_flag) { 2403 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2404 bdr_ori); 2405 if (ret) { 2406 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2407 RTW89_PCIE_L1_CTRL); 2408 return ret; 2409 } 2410 } 2411 2412 return ret; 2413 } 2414 2415 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2416 { 2417 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2418 int ret; 2419 2420 if (chip_id == RTL8852A) { 2421 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2422 PCIE_PHY_GEN1); 2423 if (ret) 2424 return ret; 2425 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2426 PCIE_PHY_GEN2); 2427 if (ret) 2428 return ret; 2429 } else if (chip_id == RTL8852C) { 2430 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2431 B_AX_DEGLITCH); 2432 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2433 B_AX_DEGLITCH); 2434 } 2435 2436 return 0; 2437 } 2438 2439 static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev) 2440 { 2441 u16 g1_oobs, g2_oobs; 2442 u32 backup_aspm; 2443 u32 phy_offset; 2444 u16 offset_cal; 2445 u16 oobs_val; 2446 int ret; 2447 u8 gen; 2448 2449 if (rtwdev->chip->chip_id != RTL8852C) 2450 return; 2451 2452 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2453 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2454 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + 2455 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2456 if (g1_oobs && g2_oobs) 2457 return; 2458 2459 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); 2460 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 2461 2462 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); 2463 if (ret) 2464 goto out; 2465 2466 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN); 2467 rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); 2468 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); 2469 2470 oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, 2471 OOBS_LEVEL_MASK); 2472 2473 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, 2474 OOBS_SEN_MASK, oobs_val); 2475 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, 2476 BAC_OOBS_SEL); 2477 2478 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, 2479 OOBS_SEN_MASK, oobs_val); 2480 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, 2481 BAC_OOBS_SEL); 2482 2483 /* offset K */ 2484 for (gen = 1; gen <= 2; gen++) { 2485 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 : 2486 R_RAC_DIRECT_OFFSET_G2; 2487 2488 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 2489 B_PCIE_BIT_RD_SEL); 2490 } 2491 2492 offset_cal = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2493 RAC_ANA1F * RAC_MULT, OFFSET_CAL_MASK); 2494 2495 for (gen = 1; gen <= 2; gen++) { 2496 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 : 2497 R_RAC_DIRECT_OFFSET_G2; 2498 2499 rtw89_write16_mask(rtwdev, phy_offset + RAC_ANA0B * RAC_MULT, 2500 MANUAL_LVL_MASK, offset_cal); 2501 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, 2502 OFFSET_CAL_MODE); 2503 } 2504 2505 out: 2506 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm); 2507 } 2508 2509 static void rtw89_pci_ber(struct rtw89_dev *rtwdev) 2510 { 2511 u32 phy_offset; 2512 2513 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks)) 2514 return; 2515 2516 phy_offset = R_RAC_DIRECT_OFFSET_G1; 2517 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL); 2518 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2519 2520 phy_offset = R_RAC_DIRECT_OFFSET_G2; 2521 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL); 2522 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2523 } 2524 2525 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2526 { 2527 if (rtwdev->chip->chip_id != RTL8852A) 2528 return; 2529 2530 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2531 } 2532 2533 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2534 { 2535 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2536 2537 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2538 return; 2539 2540 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2541 } 2542 2543 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2544 { 2545 int ret; 2546 2547 if (rtwdev->chip->chip_id != RTL8852A) 2548 return 0; 2549 2550 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2551 PCIE_PHY_GEN1); 2552 if (ret) 2553 return ret; 2554 2555 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2556 PCIE_PHY_GEN2); 2557 if (ret) 2558 return ret; 2559 2560 return 0; 2561 } 2562 2563 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2564 { 2565 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2566 2567 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2568 return; 2569 2570 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2571 } 2572 2573 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2574 { 2575 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2576 2577 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2578 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2579 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2580 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2581 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2582 } else if (rtwdev->chip->chip_id == RTL8852C) { 2583 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2584 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2585 } 2586 } 2587 2588 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2589 { 2590 if (!rtw89_is_rtl885xb(rtwdev)) 2591 return 0; 2592 2593 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2594 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2595 } 2596 2597 static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up) 2598 { 2599 if (pwr_up) 2600 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2601 else 2602 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2603 } 2604 2605 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2606 { 2607 if (rtwdev->chip->chip_id != RTL8852C) 2608 return; 2609 2610 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2611 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2612 } 2613 2614 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2615 { 2616 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2617 return; 2618 2619 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2620 } 2621 2622 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2623 { 2624 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2625 return; 2626 2627 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2628 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2629 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2630 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2631 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2632 } 2633 2634 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2635 { 2636 if (rtwdev->chip->chip_id != RTL8852C) 2637 return; 2638 2639 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2640 } 2641 2642 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2643 { 2644 if (rtwdev->chip->chip_id != RTL8852C) 2645 return; 2646 2647 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2648 } 2649 2650 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2651 { 2652 if (rtwdev->chip->chip_id == RTL8852C) 2653 return; 2654 2655 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2656 B_AX_SIC_EN_FORCE_CLKREQ); 2657 } 2658 2659 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2660 { 2661 const struct rtw89_pci_info *info = rtwdev->pci_info; 2662 u32 lbc; 2663 2664 if (rtwdev->chip->chip_id == RTL8852C) 2665 return; 2666 2667 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2668 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2669 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2670 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2671 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2672 } else { 2673 lbc &= ~B_AX_LBC_EN; 2674 } 2675 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2676 } 2677 2678 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2679 { 2680 const struct rtw89_pci_info *info = rtwdev->pci_info; 2681 u32 val32; 2682 2683 if (rtwdev->chip->chip_id != RTL8852C) 2684 return; 2685 2686 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2687 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2688 info->io_rcy_tmr); 2689 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2690 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2691 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2692 2693 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2694 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2695 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2696 } else { 2697 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2698 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2699 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2700 } 2701 2702 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2703 } 2704 2705 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2706 { 2707 if (rtwdev->chip->chip_id == RTL8852C) 2708 return; 2709 2710 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2711 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2712 2713 if (rtwdev->chip->chip_id == RTL8852A) 2714 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2715 B_AX_EN_CHKDSC_NO_RX_STUCK); 2716 } 2717 2718 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2719 { 2720 if (rtwdev->chip->chip_id == RTL8852C) 2721 return; 2722 2723 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2724 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2725 } 2726 2727 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) 2728 { 2729 const struct rtw89_pci_info *info = rtwdev->pci_info; 2730 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2731 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2732 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2733 B_AX_CLR_CH12_IDX; 2734 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2735 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2736 2737 if (chip_id == RTL8852A || chip_id == RTL8852C) 2738 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2739 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2740 /* clear DMA indexes */ 2741 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2742 if (chip_id == RTL8852A || chip_id == RTL8852C) 2743 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2744 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2745 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2746 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2747 } 2748 2749 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2750 { 2751 const struct rtw89_pci_info *info = rtwdev->pci_info; 2752 u32 dma_busy1 = info->dma_busy1.addr; 2753 u32 dma_busy2 = info->dma_busy2_reg; 2754 u32 check, dma_busy; 2755 int ret; 2756 2757 check = info->dma_busy1.mask; 2758 2759 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2760 10, 100, false, rtwdev, dma_busy1); 2761 if (ret) 2762 return ret; 2763 2764 if (!dma_busy2) 2765 return 0; 2766 2767 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2768 2769 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2770 10, 100, false, rtwdev, dma_busy2); 2771 if (ret) 2772 return ret; 2773 2774 return 0; 2775 } 2776 2777 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2778 { 2779 const struct rtw89_pci_info *info = rtwdev->pci_info; 2780 u32 dma_busy3 = info->dma_busy3_reg; 2781 u32 check, dma_busy; 2782 int ret; 2783 2784 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2785 2786 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2787 10, 100, false, rtwdev, dma_busy3); 2788 if (ret) 2789 return ret; 2790 2791 return 0; 2792 } 2793 2794 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2795 { 2796 u32 ret; 2797 2798 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev); 2799 if (ret) { 2800 rtw89_err(rtwdev, "txdma ch busy\n"); 2801 return ret; 2802 } 2803 2804 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev); 2805 if (ret) { 2806 rtw89_err(rtwdev, "rxdma ch busy\n"); 2807 return ret; 2808 } 2809 2810 return 0; 2811 } 2812 2813 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2814 { 2815 const struct rtw89_pci_info *info = rtwdev->pci_info; 2816 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2817 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2818 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2819 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2820 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2821 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2822 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2823 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2824 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2825 u8 cv = rtwdev->hal.cv; 2826 u32 val32; 2827 2828 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2829 if (chip_id == RTL8852A && cv == CHIP_CBV) 2830 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2831 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2832 if (chip_id == RTL8852A || chip_id == RTL8852B) 2833 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2834 } 2835 2836 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2837 if (chip_id == RTL8852A && cv == CHIP_CBV) 2838 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2839 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2840 if (chip_id == RTL8852A || chip_id == RTL8852B) 2841 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2842 } 2843 2844 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2845 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2846 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2847 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2848 2849 if (chip_id == RTL8852A || chip_id == RTL8852B) 2850 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2851 B_AX_PCIE_RX_APPLEN_MASK, 0); 2852 } 2853 2854 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2855 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2856 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2857 } else if (chip_id == RTL8852C) { 2858 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2859 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2860 } 2861 2862 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2863 if (tag_mode == MAC_AX_TAG_SGL) { 2864 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2865 ~B_AX_LATENCY_CONTROL; 2866 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2867 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2868 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2869 B_AX_LATENCY_CONTROL; 2870 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2871 } 2872 } 2873 2874 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2875 info->multi_tag_num); 2876 2877 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2878 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2879 wd_dma_idle_intvl); 2880 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2881 wd_dma_act_intvl); 2882 } else if (chip_id == RTL8852C) { 2883 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2884 wd_dma_idle_intvl); 2885 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2886 wd_dma_act_intvl); 2887 } 2888 2889 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2890 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2891 B_AX_HOST_ADDR_INFO_8B_SEL); 2892 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2893 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2894 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2895 B_AX_HOST_ADDR_INFO_8B_SEL); 2896 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2897 } 2898 2899 return 0; 2900 } 2901 2902 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2903 { 2904 const struct rtw89_pci_info *info = rtwdev->pci_info; 2905 2906 rtw89_pci_power_wake(rtwdev, false); 2907 2908 if (rtwdev->chip->chip_id == RTL8852A) { 2909 /* ltr sw trigger */ 2910 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2911 } 2912 info->ltr_set(rtwdev, false); 2913 rtw89_pci_ctrl_dma_all(rtwdev, false); 2914 rtw89_pci_clr_idx_all(rtwdev); 2915 2916 return 0; 2917 } 2918 2919 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) 2920 { 2921 const struct rtw89_pci_info *info = rtwdev->pci_info; 2922 int ret; 2923 2924 rtw89_pci_ber(rtwdev); 2925 rtw89_pci_rxdma_prefth(rtwdev); 2926 rtw89_pci_l1off_pwroff(rtwdev); 2927 rtw89_pci_deglitch_setting(rtwdev); 2928 ret = rtw89_pci_l2_rxen_lat(rtwdev); 2929 if (ret) { 2930 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2931 return ret; 2932 } 2933 2934 rtw89_pci_aphy_pwrcut(rtwdev); 2935 rtw89_pci_hci_ldo(rtwdev); 2936 rtw89_pci_dphy_delay(rtwdev); 2937 2938 ret = rtw89_pci_autok_x(rtwdev); 2939 if (ret) { 2940 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 2941 return ret; 2942 } 2943 2944 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2945 if (ret) { 2946 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2947 return ret; 2948 } 2949 2950 rtw89_pci_power_wake_ax(rtwdev, true); 2951 rtw89_pci_autoload_hang(rtwdev); 2952 rtw89_pci_l12_vmain(rtwdev); 2953 rtw89_pci_gen2_force_ib(rtwdev); 2954 rtw89_pci_l1_ent_lat(rtwdev); 2955 rtw89_pci_wd_exit_l1(rtwdev); 2956 rtw89_pci_set_sic(rtwdev); 2957 rtw89_pci_set_lbc(rtwdev); 2958 rtw89_pci_set_io_rcy(rtwdev); 2959 rtw89_pci_set_dbg(rtwdev); 2960 rtw89_pci_set_keep_reg(rtwdev); 2961 2962 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 2963 2964 /* stop DMA activities */ 2965 rtw89_pci_ctrl_dma_all(rtwdev, false); 2966 2967 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2968 if (ret) { 2969 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2970 return ret; 2971 } 2972 2973 rtw89_pci_clr_idx_all(rtwdev); 2974 rtw89_pci_mode_op(rtwdev); 2975 2976 /* fill TRX BD indexes */ 2977 rtw89_pci_ops_reset(rtwdev); 2978 2979 ret = rtw89_pci_rst_bdram_ax(rtwdev); 2980 if (ret) { 2981 rtw89_warn(rtwdev, "reset bdram busy\n"); 2982 return ret; 2983 } 2984 2985 /* disable all channels except to FW CMD channel to download firmware */ 2986 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false); 2987 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true); 2988 2989 /* start DMA activities */ 2990 rtw89_pci_ctrl_dma_all(rtwdev, true); 2991 2992 return 0; 2993 } 2994 2995 static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev) 2996 { 2997 rtw89_pci_power_wake_ax(rtwdev, false); 2998 2999 return 0; 3000 } 3001 3002 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 3003 { 3004 u32 val; 3005 3006 if (!en) 3007 return 0; 3008 3009 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 3010 if (rtw89_pci_ltr_is_err_reg_val(val)) 3011 return -EINVAL; 3012 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 3013 if (rtw89_pci_ltr_is_err_reg_val(val)) 3014 return -EINVAL; 3015 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 3016 if (rtw89_pci_ltr_is_err_reg_val(val)) 3017 return -EINVAL; 3018 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 3019 if (rtw89_pci_ltr_is_err_reg_val(val)) 3020 return -EINVAL; 3021 3022 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 3023 B_AX_LTR_WD_NOEMP_CHK); 3024 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 3025 PCI_LTR_SPC_500US); 3026 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 3027 PCI_LTR_IDLE_TIMER_3_2MS); 3028 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 3029 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 3030 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 3031 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 3032 3033 return 0; 3034 } 3035 EXPORT_SYMBOL(rtw89_pci_ltr_set); 3036 3037 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 3038 { 3039 u32 dec_ctrl; 3040 u32 val32; 3041 3042 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 3043 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3044 return -EINVAL; 3045 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 3046 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3047 return -EINVAL; 3048 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 3049 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 3050 return -EINVAL; 3051 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 3052 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3053 return -EINVAL; 3054 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 3055 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3056 return -EINVAL; 3057 3058 if (!en) { 3059 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 3060 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 3061 B_AX_LTR_REQ_DRV; 3062 } else { 3063 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 3064 } 3065 3066 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 3067 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 3068 3069 if (en) 3070 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 3071 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 3072 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 3073 PCI_LTR_IDLE_TIMER_3_2MS); 3074 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 3075 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 3076 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 3077 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 3078 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 3079 3080 return 0; 3081 } 3082 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 3083 3084 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) 3085 { 3086 const struct rtw89_pci_info *info = rtwdev->pci_info; 3087 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3088 int ret; 3089 3090 ret = info->ltr_set(rtwdev, true); 3091 if (ret) { 3092 rtw89_err(rtwdev, "pci ltr set fail\n"); 3093 return ret; 3094 } 3095 if (chip_id == RTL8852A) { 3096 /* ltr sw trigger */ 3097 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 3098 } 3099 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3100 /* ADDR info 8-byte mode */ 3101 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 3102 B_AX_HOST_ADDR_INFO_8B_SEL); 3103 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 3104 } 3105 3106 /* enable DMA for all queues */ 3107 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true); 3108 3109 /* Release PCI IO */ 3110 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 3111 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 3112 3113 return 0; 3114 } 3115 3116 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 3117 struct pci_dev *pdev) 3118 { 3119 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3120 int ret; 3121 3122 ret = pci_enable_device(pdev); 3123 if (ret) { 3124 rtw89_err(rtwdev, "failed to enable pci device\n"); 3125 return ret; 3126 } 3127 3128 pci_set_master(pdev); 3129 pci_set_drvdata(pdev, rtwdev->hw); 3130 3131 rtwpci->pdev = pdev; 3132 3133 return 0; 3134 } 3135 3136 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 3137 struct pci_dev *pdev) 3138 { 3139 pci_disable_device(pdev); 3140 } 3141 3142 static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev) 3143 { 3144 const struct rtw89_chip_info *chip = rtwdev->chip; 3145 3146 switch (chip->chip_id) { 3147 case RTL8852A: 3148 case RTL8852B: 3149 case RTL8851B: 3150 case RTL8852BT: 3151 return true; 3152 default: 3153 return false; 3154 } 3155 } 3156 3157 static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev) 3158 { 3159 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3160 struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev); 3161 3162 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3163 return true; 3164 3165 if (!bridge) 3166 return false; 3167 3168 switch (bridge->vendor) { 3169 case PCI_VENDOR_ID_INTEL: 3170 return true; 3171 case PCI_VENDOR_ID_ASMEDIA: 3172 if (bridge->device == 0x2806) 3173 return true; 3174 break; 3175 } 3176 3177 return false; 3178 } 3179 3180 static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) 3181 { 3182 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3183 3184 if (!rtwpci->enable_dac) 3185 return; 3186 3187 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3188 return; 3189 3190 rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS); 3191 } 3192 3193 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 3194 struct pci_dev *pdev) 3195 { 3196 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3197 unsigned long resource_len; 3198 u8 bar_id = 2; 3199 int ret; 3200 3201 ret = pci_request_regions(pdev, KBUILD_MODNAME); 3202 if (ret) { 3203 rtw89_err(rtwdev, "failed to request pci regions\n"); 3204 goto err; 3205 } 3206 3207 if (!rtw89_pci_is_dac_compatible_bridge(rtwdev)) 3208 goto no_dac; 3209 3210 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); 3211 if (!ret) { 3212 rtwpci->enable_dac = true; 3213 rtw89_pci_cfg_dac(rtwdev); 3214 } else { 3215 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3216 if (ret) { 3217 rtw89_err(rtwdev, 3218 "failed to set dma and consistent mask to 32/36-bit\n"); 3219 goto err_release_regions; 3220 } 3221 } 3222 no_dac: 3223 3224 #if defined(__FreeBSD__) 3225 linuxkpi_pcim_want_to_use_bus_functions(pdev); 3226 #endif 3227 resource_len = pci_resource_len(pdev, bar_id); 3228 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 3229 if (!rtwpci->mmap) { 3230 rtw89_err(rtwdev, "failed to map pci io\n"); 3231 ret = -EIO; 3232 goto err_release_regions; 3233 } 3234 3235 return 0; 3236 3237 err_release_regions: 3238 pci_release_regions(pdev); 3239 err: 3240 return ret; 3241 } 3242 3243 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 3244 struct pci_dev *pdev) 3245 { 3246 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3247 3248 if (rtwpci->mmap) { 3249 pci_iounmap(pdev, rtwpci->mmap); 3250 pci_release_regions(pdev); 3251 } 3252 } 3253 3254 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 3255 struct pci_dev *pdev, 3256 struct rtw89_pci_tx_ring *tx_ring) 3257 { 3258 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3259 u8 *head = wd_ring->head; 3260 dma_addr_t dma = wd_ring->dma; 3261 u32 page_size = wd_ring->page_size; 3262 u32 page_num = wd_ring->page_num; 3263 u32 ring_sz = page_size * page_num; 3264 3265 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3266 wd_ring->head = NULL; 3267 } 3268 3269 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 3270 struct pci_dev *pdev, 3271 struct rtw89_pci_tx_ring *tx_ring) 3272 { 3273 int ring_sz; 3274 u8 *head; 3275 dma_addr_t dma; 3276 3277 head = tx_ring->bd_ring.head; 3278 dma = tx_ring->bd_ring.dma; 3279 ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 3280 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3281 3282 tx_ring->bd_ring.head = NULL; 3283 } 3284 3285 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 3286 struct pci_dev *pdev) 3287 { 3288 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3289 const struct rtw89_pci_info *info = rtwdev->pci_info; 3290 struct rtw89_pci_tx_ring *tx_ring; 3291 int i; 3292 3293 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3294 if (info->tx_dma_ch_mask & BIT(i)) 3295 continue; 3296 tx_ring = &rtwpci->tx_rings[i]; 3297 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3298 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3299 } 3300 } 3301 3302 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 3303 struct pci_dev *pdev, 3304 struct rtw89_pci_rx_ring *rx_ring) 3305 { 3306 struct rtw89_pci_rx_info *rx_info; 3307 struct sk_buff *skb; 3308 dma_addr_t dma; 3309 u32 buf_sz; 3310 u8 *head; 3311 int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 3312 int i; 3313 3314 buf_sz = rx_ring->buf_sz; 3315 for (i = 0; i < rx_ring->bd_ring.len; i++) { 3316 skb = rx_ring->buf[i]; 3317 if (!skb) 3318 continue; 3319 3320 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3321 dma = rx_info->dma; 3322 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3323 dev_kfree_skb(skb); 3324 rx_ring->buf[i] = NULL; 3325 } 3326 3327 head = rx_ring->bd_ring.head; 3328 dma = rx_ring->bd_ring.dma; 3329 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3330 3331 rx_ring->bd_ring.head = NULL; 3332 } 3333 3334 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 3335 struct pci_dev *pdev) 3336 { 3337 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3338 struct rtw89_pci_rx_ring *rx_ring; 3339 int i; 3340 3341 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3342 rx_ring = &rtwpci->rx_rings[i]; 3343 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3344 } 3345 } 3346 3347 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 3348 struct pci_dev *pdev) 3349 { 3350 rtw89_pci_free_rx_rings(rtwdev, pdev); 3351 rtw89_pci_free_tx_rings(rtwdev, pdev); 3352 } 3353 3354 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 3355 struct rtw89_pci_rx_ring *rx_ring, 3356 struct sk_buff *skb, int buf_sz, u32 idx) 3357 { 3358 struct rtw89_pci_rx_info *rx_info; 3359 struct rtw89_pci_rx_bd_32 *rx_bd; 3360 dma_addr_t dma; 3361 3362 if (!skb) 3363 return -EINVAL; 3364 3365 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 3366 if (dma_mapping_error(&pdev->dev, dma)) 3367 return -EBUSY; 3368 3369 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3370 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 3371 3372 memset(rx_bd, 0, sizeof(*rx_bd)); 3373 rx_bd->buf_size = cpu_to_le16(buf_sz); 3374 rx_bd->dma = cpu_to_le32(dma); 3375 rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI); 3376 rx_info->dma = dma; 3377 3378 return 0; 3379 } 3380 3381 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 3382 struct pci_dev *pdev, 3383 struct rtw89_pci_tx_ring *tx_ring, 3384 enum rtw89_tx_channel txch) 3385 { 3386 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3387 struct rtw89_pci_tx_wd *txwd; 3388 dma_addr_t dma; 3389 dma_addr_t cur_paddr; 3390 u8 *head; 3391 u8 *cur_vaddr; 3392 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 3393 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 3394 u32 ring_sz = page_size * page_num; 3395 u32 page_offset; 3396 int i; 3397 3398 /* FWCMD queue doesn't use txwd as pages */ 3399 if (txch == RTW89_TXCH_CH12) 3400 return 0; 3401 3402 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3403 if (!head) 3404 return -ENOMEM; 3405 3406 INIT_LIST_HEAD(&wd_ring->free_pages); 3407 wd_ring->head = head; 3408 wd_ring->dma = dma; 3409 wd_ring->page_size = page_size; 3410 wd_ring->page_num = page_num; 3411 3412 page_offset = 0; 3413 for (i = 0; i < page_num; i++) { 3414 txwd = &wd_ring->pages[i]; 3415 cur_paddr = dma + page_offset; 3416 cur_vaddr = head + page_offset; 3417 3418 skb_queue_head_init(&txwd->queue); 3419 INIT_LIST_HEAD(&txwd->list); 3420 txwd->paddr = cur_paddr; 3421 txwd->vaddr = cur_vaddr; 3422 txwd->len = page_size; 3423 txwd->seq = i; 3424 rtw89_pci_enqueue_txwd(tx_ring, txwd); 3425 3426 page_offset += page_size; 3427 } 3428 3429 return 0; 3430 } 3431 3432 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 3433 struct pci_dev *pdev, 3434 struct rtw89_pci_tx_ring *tx_ring, 3435 u32 desc_size, u32 len, 3436 enum rtw89_tx_channel txch) 3437 { 3438 const struct rtw89_pci_ch_dma_addr *txch_addr; 3439 int ring_sz = desc_size * len; 3440 u8 *head; 3441 dma_addr_t dma; 3442 int ret; 3443 3444 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 3445 if (ret) { 3446 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 3447 goto err; 3448 } 3449 3450 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3451 if (ret) { 3452 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3453 goto err_free_wd_ring; 3454 } 3455 3456 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3457 if (!head) { 3458 ret = -ENOMEM; 3459 goto err_free_wd_ring; 3460 } 3461 3462 INIT_LIST_HEAD(&tx_ring->busy_pages); 3463 tx_ring->bd_ring.head = head; 3464 tx_ring->bd_ring.dma = dma; 3465 tx_ring->bd_ring.len = len; 3466 tx_ring->bd_ring.desc_size = desc_size; 3467 tx_ring->bd_ring.addr = *txch_addr; 3468 tx_ring->bd_ring.wp = 0; 3469 tx_ring->bd_ring.rp = 0; 3470 tx_ring->txch = txch; 3471 3472 return 0; 3473 3474 err_free_wd_ring: 3475 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3476 err: 3477 return ret; 3478 } 3479 3480 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 3481 struct pci_dev *pdev) 3482 { 3483 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3484 const struct rtw89_pci_info *info = rtwdev->pci_info; 3485 struct rtw89_pci_tx_ring *tx_ring; 3486 u32 desc_size; 3487 u32 len; 3488 u32 i, tx_allocated; 3489 int ret; 3490 3491 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3492 if (info->tx_dma_ch_mask & BIT(i)) 3493 continue; 3494 tx_ring = &rtwpci->tx_rings[i]; 3495 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3496 len = RTW89_PCI_TXBD_NUM_MAX; 3497 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3498 desc_size, len, i); 3499 if (ret) { 3500 #if defined(__linux__) 3501 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3502 #elif defined(__FreeBSD__) 3503 rtw89_err(rtwdev, "failed to alloc tx ring %d: ret=%d\n", i, ret); 3504 #endif 3505 goto err_free; 3506 } 3507 } 3508 3509 return 0; 3510 3511 err_free: 3512 tx_allocated = i; 3513 for (i = 0; i < tx_allocated; i++) { 3514 tx_ring = &rtwpci->tx_rings[i]; 3515 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3516 } 3517 3518 return ret; 3519 } 3520 3521 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3522 struct pci_dev *pdev, 3523 struct rtw89_pci_rx_ring *rx_ring, 3524 u32 desc_size, u32 len, u32 rxch) 3525 { 3526 const struct rtw89_pci_info *info = rtwdev->pci_info; 3527 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3528 struct sk_buff *skb; 3529 u8 *head; 3530 dma_addr_t dma; 3531 int ring_sz = desc_size * len; 3532 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3533 int i, allocated; 3534 int ret; 3535 3536 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3537 if (ret) { 3538 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3539 return ret; 3540 } 3541 3542 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3543 if (!head) { 3544 ret = -ENOMEM; 3545 goto err; 3546 } 3547 3548 rx_ring->bd_ring.head = head; 3549 rx_ring->bd_ring.dma = dma; 3550 rx_ring->bd_ring.len = len; 3551 rx_ring->bd_ring.desc_size = desc_size; 3552 rx_ring->bd_ring.addr = *rxch_addr; 3553 if (info->rx_ring_eq_is_full) 3554 rx_ring->bd_ring.wp = len - 1; 3555 else 3556 rx_ring->bd_ring.wp = 0; 3557 rx_ring->bd_ring.rp = 0; 3558 rx_ring->buf_sz = buf_sz; 3559 rx_ring->diliver_skb = NULL; 3560 rx_ring->diliver_desc.ready = false; 3561 rx_ring->target_rx_tag = 0; 3562 3563 for (i = 0; i < len; i++) { 3564 skb = dev_alloc_skb(buf_sz); 3565 if (!skb) { 3566 ret = -ENOMEM; 3567 goto err_free; 3568 } 3569 3570 memset(skb->data, 0, buf_sz); 3571 rx_ring->buf[i] = skb; 3572 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3573 buf_sz, i); 3574 if (ret) { 3575 #if defined(__linux__) 3576 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3577 #elif defined(__FreeBSD__) 3578 rtw89_err(rtwdev, "failed to init rx buf %d ret=%d\n", i, ret); 3579 #endif 3580 dev_kfree_skb_any(skb); 3581 rx_ring->buf[i] = NULL; 3582 goto err_free; 3583 } 3584 } 3585 3586 return 0; 3587 3588 err_free: 3589 allocated = i; 3590 for (i = 0; i < allocated; i++) { 3591 skb = rx_ring->buf[i]; 3592 if (!skb) 3593 continue; 3594 dma = *((dma_addr_t *)skb->cb); 3595 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3596 dev_kfree_skb(skb); 3597 rx_ring->buf[i] = NULL; 3598 } 3599 3600 head = rx_ring->bd_ring.head; 3601 dma = rx_ring->bd_ring.dma; 3602 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3603 3604 rx_ring->bd_ring.head = NULL; 3605 err: 3606 return ret; 3607 } 3608 3609 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3610 struct pci_dev *pdev) 3611 { 3612 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3613 struct rtw89_pci_rx_ring *rx_ring; 3614 u32 desc_size; 3615 u32 len; 3616 int i, rx_allocated; 3617 int ret; 3618 3619 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3620 rx_ring = &rtwpci->rx_rings[i]; 3621 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3622 len = RTW89_PCI_RXBD_NUM_MAX; 3623 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3624 desc_size, len, i); 3625 if (ret) { 3626 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3627 goto err_free; 3628 } 3629 } 3630 3631 return 0; 3632 3633 err_free: 3634 rx_allocated = i; 3635 for (i = 0; i < rx_allocated; i++) { 3636 rx_ring = &rtwpci->rx_rings[i]; 3637 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3638 } 3639 3640 return ret; 3641 } 3642 3643 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3644 struct pci_dev *pdev) 3645 { 3646 int ret; 3647 3648 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3649 if (ret) { 3650 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3651 goto err; 3652 } 3653 3654 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3655 if (ret) { 3656 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3657 goto err_free_tx_rings; 3658 } 3659 3660 return 0; 3661 3662 err_free_tx_rings: 3663 rtw89_pci_free_tx_rings(rtwdev, pdev); 3664 err: 3665 return ret; 3666 } 3667 3668 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3669 struct rtw89_pci *rtwpci) 3670 { 3671 skb_queue_head_init(&rtwpci->h2c_queue); 3672 skb_queue_head_init(&rtwpci->h2c_release_queue); 3673 } 3674 3675 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3676 struct pci_dev *pdev) 3677 { 3678 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3679 int ret; 3680 3681 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3682 if (ret) { 3683 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3684 goto err; 3685 } 3686 3687 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3688 if (ret) { 3689 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3690 goto err_pci_unmap; 3691 } 3692 3693 rtw89_pci_h2c_init(rtwdev, rtwpci); 3694 3695 spin_lock_init(&rtwpci->irq_lock); 3696 spin_lock_init(&rtwpci->trx_lock); 3697 3698 return 0; 3699 3700 err_pci_unmap: 3701 rtw89_pci_clear_mapping(rtwdev, pdev); 3702 err: 3703 return ret; 3704 } 3705 3706 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3707 struct pci_dev *pdev) 3708 { 3709 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3710 3711 rtw89_pci_free_trx_rings(rtwdev, pdev); 3712 rtw89_pci_clear_mapping(rtwdev, pdev); 3713 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3714 skb_queue_len(&rtwpci->h2c_queue), true); 3715 } 3716 3717 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3718 { 3719 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3720 const struct rtw89_chip_info *chip = rtwdev->chip; 3721 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; 3722 3723 if (chip->chip_id == RTL8851B) 3724 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; 3725 3726 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3727 3728 if (rtwpci->under_recovery) { 3729 rtwpci->intrs[0] = hs0isr_ind_int_en; 3730 rtwpci->intrs[1] = 0; 3731 } else { 3732 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3733 B_AX_RXDMA_INT_EN | 3734 B_AX_RXP1DMA_INT_EN | 3735 B_AX_RPQDMA_INT_EN | 3736 B_AX_RXDMA_STUCK_INT_EN | 3737 B_AX_RDU_INT_EN | 3738 B_AX_RPQBD_FULL_INT_EN | 3739 hs0isr_ind_int_en; 3740 3741 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3742 } 3743 } 3744 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3745 3746 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3747 { 3748 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3749 3750 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3751 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3752 rtwpci->intrs[0] = 0; 3753 rtwpci->intrs[1] = 0; 3754 } 3755 3756 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3757 { 3758 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3759 3760 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3761 B_AX_HS1ISR_IND_INT_EN | 3762 B_AX_HS0ISR_IND_INT_EN; 3763 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3764 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3765 B_AX_RXDMA_INT_EN | 3766 B_AX_RXP1DMA_INT_EN | 3767 B_AX_RPQDMA_INT_EN | 3768 B_AX_RXDMA_STUCK_INT_EN | 3769 B_AX_RDU_INT_EN | 3770 B_AX_RPQBD_FULL_INT_EN; 3771 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3772 } 3773 3774 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3775 { 3776 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3777 3778 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3779 B_AX_HS0ISR_IND_INT_EN; 3780 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3781 rtwpci->intrs[0] = 0; 3782 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3783 } 3784 3785 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3786 { 3787 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3788 3789 if (rtwpci->under_recovery) 3790 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3791 else if (rtwpci->low_power) 3792 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3793 else 3794 rtw89_pci_default_intr_mask_v1(rtwdev); 3795 } 3796 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3797 3798 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev) 3799 { 3800 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3801 3802 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3803 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3804 rtwpci->intrs[0] = 0; 3805 rtwpci->intrs[1] = 0; 3806 } 3807 3808 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev) 3809 { 3810 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3811 3812 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 | 3813 B_BE_HS0_IND_INT_EN0; 3814 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3815 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 | 3816 B_BE_RDU_CH0_INT_IMR_V1; 3817 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3818 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3819 } 3820 3821 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev) 3822 { 3823 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3824 3825 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 | 3826 B_BE_HS1_IND_INT_EN0; 3827 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3828 rtwpci->intrs[0] = 0; 3829 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3830 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3831 } 3832 3833 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev) 3834 { 3835 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3836 3837 if (rtwpci->under_recovery) 3838 rtw89_pci_recovery_intr_mask_v2(rtwdev); 3839 else if (rtwpci->low_power) 3840 rtw89_pci_low_power_intr_mask_v2(rtwdev); 3841 else 3842 rtw89_pci_default_intr_mask_v2(rtwdev); 3843 } 3844 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); 3845 3846 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3847 struct pci_dev *pdev) 3848 { 3849 unsigned long flags = 0; 3850 int ret; 3851 3852 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI; 3853 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3854 if (ret < 0) { 3855 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3856 goto err; 3857 } 3858 3859 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3860 rtw89_pci_interrupt_handler, 3861 rtw89_pci_interrupt_threadfn, 3862 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3863 if (ret) { 3864 rtw89_err(rtwdev, "failed to request threaded irq\n"); 3865 goto err_free_vector; 3866 } 3867 3868 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3869 3870 return 0; 3871 3872 err_free_vector: 3873 pci_free_irq_vectors(pdev); 3874 err: 3875 return ret; 3876 } 3877 3878 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3879 struct pci_dev *pdev) 3880 { 3881 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3882 pci_free_irq_vectors(pdev); 3883 } 3884 3885 static u16 gray_code_to_bin(u16 gray_code) 3886 { 3887 u16 binary = gray_code; 3888 3889 while (gray_code) { 3890 gray_code >>= 1; 3891 binary ^= gray_code; 3892 } 3893 3894 return binary; 3895 } 3896 3897 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3898 { 3899 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3900 struct pci_dev *pdev = rtwpci->pdev; 3901 u16 val16, filter_out_val; 3902 u32 val, phy_offset; 3903 int ret; 3904 3905 if (rtwdev->chip->chip_id != RTL8852C) 3906 return 0; 3907 3908 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3909 if (val == B_AX_ASPM_CTRL_L1) 3910 return 0; 3911 3912 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3913 if (ret) 3914 return ret; 3915 3916 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3917 if (val == RTW89_PCIE_GEN1_SPEED) { 3918 phy_offset = R_RAC_DIRECT_OFFSET_G1; 3919 } else if (val == RTW89_PCIE_GEN2_SPEED) { 3920 phy_offset = R_RAC_DIRECT_OFFSET_G2; 3921 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3922 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3923 val16 | B_PCIE_BIT_PINOUT_DIS); 3924 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3925 val16 & ~B_PCIE_BIT_RD_SEL); 3926 3927 val16 = rtw89_read16_mask(rtwdev, 3928 phy_offset + RAC_ANA1F * RAC_MULT, 3929 FILTER_OUT_EQ_MASK); 3930 val16 = gray_code_to_bin(val16); 3931 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3932 RAC_MULT); 3933 filter_out_val &= ~REG_FILTER_OUT_MASK; 3934 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3935 3936 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3937 filter_out_val); 3938 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3939 B_BAC_EQ_SEL); 3940 rtw89_write16_set(rtwdev, 3941 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3942 B_PCIE_BIT_PSAVE); 3943 } else { 3944 return -EOPNOTSUPP; 3945 } 3946 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3947 B_PCIE_BIT_PSAVE); 3948 3949 return 0; 3950 } 3951 3952 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3953 { 3954 const struct rtw89_pci_info *info = rtwdev->pci_info; 3955 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 3956 3957 if (rtw89_pci_disable_clkreq) 3958 return; 3959 3960 gen_def->clkreq_set(rtwdev, enable); 3961 } 3962 3963 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) 3964 { 3965 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3966 int ret; 3967 3968 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3969 PCIE_CLKDLY_HW_30US); 3970 if (ret) 3971 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3972 3973 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3974 if (enable) 3975 ret = rtw89_pci_config_byte_set(rtwdev, 3976 RTW89_PCIE_L1_CTRL, 3977 RTW89_PCIE_BIT_CLK); 3978 else 3979 ret = rtw89_pci_config_byte_clr(rtwdev, 3980 RTW89_PCIE_L1_CTRL, 3981 RTW89_PCIE_BIT_CLK); 3982 if (ret) 3983 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3984 enable ? "set" : "unset", ret); 3985 } else if (chip_id == RTL8852C) { 3986 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3987 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3988 if (enable) 3989 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3990 B_AX_CLK_REQ_N); 3991 else 3992 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3993 B_AX_CLK_REQ_N); 3994 } 3995 } 3996 3997 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3998 { 3999 const struct rtw89_pci_info *info = rtwdev->pci_info; 4000 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4001 4002 if (rtw89_pci_disable_aspm_l1) 4003 return; 4004 4005 gen_def->aspm_set(rtwdev, enable); 4006 } 4007 4008 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) 4009 { 4010 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4011 u8 value = 0; 4012 int ret; 4013 4014 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 4015 if (ret) 4016 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 4017 4018 u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK); 4019 u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK); 4020 4021 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 4022 if (ret) 4023 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 4024 4025 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4026 if (enable) 4027 ret = rtw89_pci_config_byte_set(rtwdev, 4028 RTW89_PCIE_L1_CTRL, 4029 RTW89_PCIE_BIT_L1); 4030 else 4031 ret = rtw89_pci_config_byte_clr(rtwdev, 4032 RTW89_PCIE_L1_CTRL, 4033 RTW89_PCIE_BIT_L1); 4034 } else if (chip_id == RTL8852C) { 4035 if (enable) 4036 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4037 B_AX_ASPM_CTRL_L1); 4038 else 4039 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4040 B_AX_ASPM_CTRL_L1); 4041 } 4042 if (ret) 4043 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 4044 enable ? "set" : "unset", ret); 4045 } 4046 4047 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 4048 { 4049 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 4050 const struct rtw89_pci_info *info = rtwdev->pci_info; 4051 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4052 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 4053 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 4054 u32 val = 0; 4055 4056 if (rtwdev->scanning || 4057 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH)) 4058 goto out; 4059 4060 if (chip_gen == RTW89_CHIP_BE) 4061 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN; 4062 else 4063 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 4064 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 4065 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 4066 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 4067 4068 out: 4069 rtw89_write32(rtwdev, info->mit_addr, val); 4070 } 4071 4072 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 4073 { 4074 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4075 struct pci_dev *pdev = rtwpci->pdev; 4076 u16 link_ctrl; 4077 int ret; 4078 4079 /* Though there is standard PCIE configuration space to set the 4080 * link control register, but by Realtek's design, driver should 4081 * check if host supports CLKREQ/ASPM to enable the HW module. 4082 * 4083 * These functions are implemented by two HW modules associated, 4084 * one is responsible to access PCIE configuration space to 4085 * follow the host settings, and another is in charge of doing 4086 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 4087 * the host does not support it, and due to some reasons or wrong 4088 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 4089 * loss if HW misbehaves on the link. 4090 * 4091 * Hence it's designed that driver should first check the PCIE 4092 * configuration space is sync'ed and enabled, then driver can turn 4093 * on the other module that is actually working on the mechanism. 4094 */ 4095 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 4096 if (ret) { 4097 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 4098 return; 4099 } 4100 4101 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 4102 rtw89_pci_clkreq_set(rtwdev, true); 4103 4104 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 4105 rtw89_pci_aspm_set(rtwdev, true); 4106 } 4107 4108 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 4109 { 4110 const struct rtw89_pci_info *info = rtwdev->pci_info; 4111 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4112 4113 if (rtw89_pci_disable_l1ss) 4114 return; 4115 4116 gen_def->l1ss_set(rtwdev, enable); 4117 } 4118 4119 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) 4120 { 4121 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4122 int ret; 4123 4124 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4125 if (enable) 4126 ret = rtw89_pci_config_byte_set(rtwdev, 4127 RTW89_PCIE_TIMER_CTRL, 4128 RTW89_PCIE_BIT_L1SUB); 4129 else 4130 ret = rtw89_pci_config_byte_clr(rtwdev, 4131 RTW89_PCIE_TIMER_CTRL, 4132 RTW89_PCIE_BIT_L1SUB); 4133 if (ret) 4134 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 4135 enable ? "set" : "unset", ret); 4136 } else if (chip_id == RTL8852C) { 4137 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 4138 RTW89_PCIE_BIT_ASPM_L11 | 4139 RTW89_PCIE_BIT_PCI_L11); 4140 if (ret) 4141 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 4142 if (enable) 4143 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4144 B_AX_L1SUB_DISABLE); 4145 else 4146 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4147 B_AX_L1SUB_DISABLE); 4148 } 4149 } 4150 4151 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 4152 { 4153 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4154 struct pci_dev *pdev = rtwpci->pdev; 4155 u32 l1ss_cap_ptr, l1ss_ctrl; 4156 4157 if (rtw89_pci_disable_l1ss) 4158 return; 4159 4160 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 4161 if (!l1ss_cap_ptr) 4162 return; 4163 4164 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 4165 4166 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 4167 rtw89_pci_l1ss_set(rtwdev, true); 4168 } 4169 4170 static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev) 4171 { 4172 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4173 struct pci_dev *pdev = rtwpci->pdev; 4174 4175 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, 4176 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS); 4177 } 4178 4179 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev) 4180 { 4181 int ret = 0; 4182 u32 sts; 4183 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 4184 4185 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 4186 10, 1000, false, rtwdev, 4187 R_AX_PCIE_DMA_BUSY1); 4188 if (ret) { 4189 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 4190 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 4191 return -EINVAL; 4192 } 4193 return ret; 4194 } 4195 4196 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev) 4197 { 4198 u32 val; 4199 int ret; 4200 4201 if (rtwdev->chip->chip_id == RTL8852C) 4202 return 0; 4203 4204 rtw89_pci_ctrl_dma_all(rtwdev, false); 4205 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4206 if (ret) { 4207 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4208 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4209 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 4210 R_AX_DBG_ERR_FLAG, val); 4211 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 4212 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 4213 if (val & B_AX_RX_STUCK) 4214 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 4215 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4216 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4217 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4218 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4219 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 4220 R_AX_DBG_ERR_FLAG, val); 4221 } 4222 4223 return ret; 4224 } 4225 4226 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) 4227 { 4228 u32 ret; 4229 4230 if (rtwdev->chip->chip_id == RTL8852C) 4231 return 0; 4232 4233 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 4234 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4235 rtw89_pci_clr_idx_all(rtwdev); 4236 4237 ret = rtw89_pci_rst_bdram_ax(rtwdev); 4238 if (ret) 4239 return ret; 4240 4241 rtw89_pci_ctrl_dma_all(rtwdev, true); 4242 return ret; 4243 } 4244 4245 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 4246 enum rtw89_lv1_rcvy_step step) 4247 { 4248 const struct rtw89_pci_info *info = rtwdev->pci_info; 4249 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4250 int ret; 4251 4252 switch (step) { 4253 case RTW89_LV1_RCVY_STEP_1: 4254 ret = gen_def->lv1rst_stop_dma(rtwdev); 4255 if (ret) 4256 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 4257 4258 break; 4259 4260 case RTW89_LV1_RCVY_STEP_2: 4261 ret = gen_def->lv1rst_start_dma(rtwdev); 4262 if (ret) 4263 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 4264 break; 4265 4266 default: 4267 return -EINVAL; 4268 } 4269 4270 return ret; 4271 } 4272 4273 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 4274 { 4275 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 4276 return; 4277 4278 if (rtwdev->chip->chip_id == RTL8852C) { 4279 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4280 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1)); 4281 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4282 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1)); 4283 } else { 4284 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 4285 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 4286 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4287 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 4288 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4289 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 4290 } 4291 } 4292 4293 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 4294 { 4295 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 4296 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4297 const struct rtw89_pci_info *info = rtwdev->pci_info; 4298 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4299 unsigned long flags; 4300 int work_done; 4301 4302 rtwdev->napi_budget_countdown = budget; 4303 4304 rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data); 4305 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4306 if (work_done == budget) 4307 return budget; 4308 4309 rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data); 4310 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4311 if (work_done < budget && napi_complete_done(napi, work_done)) { 4312 spin_lock_irqsave(&rtwpci->irq_lock, flags); 4313 if (likely(rtwpci->running)) 4314 rtw89_chip_enable_intr(rtwdev, rtwpci); 4315 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 4316 } 4317 4318 return work_done; 4319 } 4320 4321 static 4322 void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev, 4323 struct pci_dev *pdev, 4324 const struct rtw89_pci_ssid_quirk *ssid_quirks) 4325 { 4326 int i; 4327 4328 if (!ssid_quirks) 4329 return; 4330 4331 for (i = 0; i < 200; i++, ssid_quirks++) { 4332 if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0) 4333 break; 4334 4335 if (ssid_quirks->vendor != pdev->vendor || 4336 ssid_quirks->device != pdev->device || 4337 ssid_quirks->subsystem_vendor != pdev->subsystem_vendor || 4338 ssid_quirks->subsystem_device != pdev->subsystem_device) 4339 continue; 4340 4341 bitmap_or(rtwdev->quirks, rtwdev->quirks, &ssid_quirks->bitmap, 4342 NUM_OF_RTW89_QUIRKS); 4343 rtwdev->custid = ssid_quirks->custid; 4344 break; 4345 } 4346 4347 rtw89_debug(rtwdev, RTW89_DBG_HCI, "quirks=%*ph custid=%d\n", 4348 (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid); 4349 } 4350 4351 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 4352 { 4353 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4354 struct rtw89_dev *rtwdev = hw->priv; 4355 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4356 4357 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4358 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4359 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4360 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4361 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 4362 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4363 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 4364 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4365 } else { 4366 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4367 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4368 } 4369 4370 return 0; 4371 } 4372 4373 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 4374 { 4375 if (rtwdev->chip->chip_id == RTL8852C) 4376 return; 4377 4378 /* Hardware need write the reg twice to ensure the setting work */ 4379 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4380 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4381 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4382 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4383 } 4384 4385 void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume) 4386 { 4387 if (resume) 4388 rtw89_pci_cfg_dac(rtwdev); 4389 4390 rtw89_pci_disable_eq(rtwdev); 4391 rtw89_pci_filter_out(rtwdev); 4392 rtw89_pci_cpl_timeout_cfg(rtwdev); 4393 rtw89_pci_link_cfg(rtwdev); 4394 rtw89_pci_l1ss_cfg(rtwdev); 4395 } 4396 4397 static int __maybe_unused rtw89_pci_resume(struct device *dev) 4398 { 4399 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4400 struct rtw89_dev *rtwdev = hw->priv; 4401 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4402 4403 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4404 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4405 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4406 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4407 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 4408 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4409 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 4410 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4411 } else { 4412 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4413 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4414 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4415 B_AX_SEL_REQ_ENTR_L1); 4416 } 4417 rtw89_pci_l2_hci_ldo(rtwdev); 4418 4419 rtw89_pci_basic_cfg(rtwdev, true); 4420 4421 return 0; 4422 } 4423 4424 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 4425 EXPORT_SYMBOL(rtw89_pm_ops); 4426 4427 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4428 .isr_rdu = B_AX_RDU_INT, 4429 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, 4430 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, 4431 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, 4432 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | 4433 B_AX_RDU_INT}, 4434 4435 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4436 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax, 4437 .mac_post_init = rtw89_pci_ops_mac_post_init_ax, 4438 4439 .clr_idx_all = rtw89_pci_clr_idx_all_ax, 4440 .rst_bdram = rtw89_pci_rst_bdram_ax, 4441 4442 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax, 4443 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax, 4444 4445 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax, 4446 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax, 4447 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax, 4448 4449 .aspm_set = rtw89_pci_aspm_set_ax, 4450 .clkreq_set = rtw89_pci_clkreq_set_ax, 4451 .l1ss_set = rtw89_pci_l1ss_set_ax, 4452 4453 .disable_eq = rtw89_pci_disable_eq_ax, 4454 .power_wake = rtw89_pci_power_wake_ax, 4455 }; 4456 EXPORT_SYMBOL(rtw89_pci_gen_ax); 4457 4458 static const struct rtw89_hci_ops rtw89_pci_ops = { 4459 .tx_write = rtw89_pci_ops_tx_write, 4460 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 4461 .flush_queues = rtw89_pci_ops_flush_queues, 4462 .reset = rtw89_pci_ops_reset, 4463 .start = rtw89_pci_ops_start, 4464 .stop = rtw89_pci_ops_stop, 4465 .pause = rtw89_pci_ops_pause, 4466 .switch_mode = rtw89_pci_ops_switch_mode, 4467 .recalc_int_mit = rtw89_pci_recalc_int_mit, 4468 4469 .read8 = rtw89_pci_ops_read8, 4470 .read16 = rtw89_pci_ops_read16, 4471 .read32 = rtw89_pci_ops_read32, 4472 .write8 = rtw89_pci_ops_write8, 4473 .write16 = rtw89_pci_ops_write16, 4474 .write32 = rtw89_pci_ops_write32, 4475 4476 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 4477 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit, 4478 .mac_post_init = rtw89_pci_ops_mac_post_init, 4479 .deinit = rtw89_pci_ops_deinit, 4480 4481 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 4482 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 4483 .dump_err_status = rtw89_pci_ops_dump_err_status, 4484 .napi_poll = rtw89_pci_napi_poll, 4485 4486 .recovery_start = rtw89_pci_ops_recovery_start, 4487 .recovery_complete = rtw89_pci_ops_recovery_complete, 4488 4489 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch, 4490 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch, 4491 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 4492 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle, 4493 4494 .clr_idx_all = rtw89_pci_clr_idx_all, 4495 .clear = rtw89_pci_clear_resource, 4496 .disable_intr = rtw89_pci_disable_intr_lock, 4497 .enable_intr = rtw89_pci_enable_intr_lock, 4498 .rst_bdram = rtw89_pci_reset_bdram, 4499 }; 4500 4501 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4502 { 4503 struct rtw89_dev *rtwdev; 4504 const struct rtw89_driver_info *info; 4505 const struct rtw89_pci_info *pci_info; 4506 int ret; 4507 4508 info = (const struct rtw89_driver_info *)id->driver_data; 4509 4510 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 4511 sizeof(struct rtw89_pci), 4512 info->chip, info->variant); 4513 if (!rtwdev) { 4514 dev_err(&pdev->dev, "failed to allocate hw\n"); 4515 return -ENOMEM; 4516 } 4517 4518 pci_info = info->bus.pci; 4519 4520 rtwdev->pci_info = info->bus.pci; 4521 rtwdev->hci.ops = &rtw89_pci_ops; 4522 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 4523 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 4524 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 4525 4526 rtw89_check_quirks(rtwdev, info->quirks); 4527 rtw89_check_pci_ssid_quirks(rtwdev, pdev, pci_info->ssid_quirks); 4528 4529 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 4530 4531 ret = rtw89_core_init(rtwdev); 4532 if (ret) { 4533 rtw89_err(rtwdev, "failed to initialise core\n"); 4534 goto err_release_hw; 4535 } 4536 4537 ret = rtw89_pci_claim_device(rtwdev, pdev); 4538 if (ret) { 4539 rtw89_err(rtwdev, "failed to claim pci device\n"); 4540 goto err_core_deinit; 4541 } 4542 4543 ret = rtw89_pci_setup_resource(rtwdev, pdev); 4544 if (ret) { 4545 rtw89_err(rtwdev, "failed to setup pci resource\n"); 4546 goto err_declaim_pci; 4547 } 4548 4549 ret = rtw89_chip_info_setup(rtwdev); 4550 if (ret) { 4551 rtw89_err(rtwdev, "failed to setup chip information\n"); 4552 goto err_clear_resource; 4553 } 4554 4555 rtw89_pci_basic_cfg(rtwdev, false); 4556 4557 ret = rtw89_core_napi_init(rtwdev); 4558 if (ret) { 4559 rtw89_err(rtwdev, "failed to init napi\n"); 4560 goto err_clear_resource; 4561 } 4562 4563 ret = rtw89_pci_request_irq(rtwdev, pdev); 4564 if (ret) { 4565 rtw89_err(rtwdev, "failed to request pci irq\n"); 4566 goto err_deinit_napi; 4567 } 4568 4569 ret = rtw89_core_register(rtwdev); 4570 if (ret) { 4571 rtw89_err(rtwdev, "failed to register core\n"); 4572 goto err_free_irq; 4573 } 4574 4575 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags); 4576 4577 return 0; 4578 4579 err_free_irq: 4580 rtw89_pci_free_irq(rtwdev, pdev); 4581 err_deinit_napi: 4582 rtw89_core_napi_deinit(rtwdev); 4583 err_clear_resource: 4584 rtw89_pci_clear_resource(rtwdev, pdev); 4585 err_declaim_pci: 4586 rtw89_pci_declaim_device(rtwdev, pdev); 4587 err_core_deinit: 4588 rtw89_core_deinit(rtwdev); 4589 err_release_hw: 4590 rtw89_free_ieee80211_hw(rtwdev); 4591 4592 return ret; 4593 } 4594 EXPORT_SYMBOL(rtw89_pci_probe); 4595 4596 void rtw89_pci_remove(struct pci_dev *pdev) 4597 { 4598 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4599 struct rtw89_dev *rtwdev; 4600 4601 rtwdev = hw->priv; 4602 4603 rtw89_pci_free_irq(rtwdev, pdev); 4604 rtw89_core_napi_deinit(rtwdev); 4605 rtw89_core_unregister(rtwdev); 4606 rtw89_pci_clear_resource(rtwdev, pdev); 4607 rtw89_pci_declaim_device(rtwdev, pdev); 4608 rtw89_core_deinit(rtwdev); 4609 rtw89_free_ieee80211_hw(rtwdev); 4610 } 4611 EXPORT_SYMBOL(rtw89_pci_remove); 4612 4613 MODULE_AUTHOR("Realtek Corporation"); 4614 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); 4615 MODULE_LICENSE("Dual BSD/GPL"); 4616 #if defined(__FreeBSD__) 4617 MODULE_VERSION(rtw89_pci, 1); 4618 MODULE_DEPEND(rtw89_pci, linuxkpi, 1, 1, 1); 4619 MODULE_DEPEND(rtw89_pci, linuxkpi_wlan, 1, 1, 1); 4620 #ifdef CONFIG_RTW89_DEBUGFS 4621 MODULE_DEPEND(rtw89_pci, lindebugfs, 1, 1, 1); 4622 #endif 4623 #endif 4624