1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2020 Realtek Corporation 3 */ 4 5 #include <linux/pci.h> 6 7 #include "mac.h" 8 #include "pci.h" 9 #include "reg.h" 10 #include "ser.h" 11 12 static bool rtw89_pci_disable_clkreq; 13 static bool rtw89_pci_disable_aspm_l1; 14 static bool rtw89_pci_disable_l1ss; 15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 21 22 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev, 23 u32 *phy_offset) 24 { 25 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 26 struct pci_dev *pdev = rtwpci->pdev; 27 u32 val; 28 int ret; 29 30 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 31 if (ret) 32 return ret; 33 34 val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK); 35 if (val == RTW89_PCIE_GEN1_SPEED) { 36 *phy_offset = R_RAC_DIRECT_OFFSET_G1; 37 } else if (val == RTW89_PCIE_GEN2_SPEED) { 38 *phy_offset = R_RAC_DIRECT_OFFSET_G2; 39 } else { 40 rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val); 41 return -EFAULT; 42 } 43 44 return 0; 45 } 46 47 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev) 48 { 49 u32 val; 50 int ret; 51 52 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM); 53 54 ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 55 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 56 rtwdev, R_AX_PCIE_INIT_CFG1); 57 58 return ret; 59 } 60 61 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 62 struct rtw89_pci_dma_ring *bd_ring, 63 u32 cur_idx, bool tx) 64 { 65 const struct rtw89_pci_info *info = rtwdev->pci_info; 66 u32 cnt, cur_rp, wp, rp, len; 67 68 rp = bd_ring->rp; 69 wp = bd_ring->wp; 70 len = bd_ring->len; 71 72 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 73 if (tx) { 74 cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 75 } else { 76 if (info->rx_ring_eq_is_full) 77 wp += 1; 78 79 cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 80 } 81 82 bd_ring->rp = cur_rp; 83 84 return cnt; 85 } 86 87 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 88 struct rtw89_pci_tx_ring *tx_ring) 89 { 90 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 91 u32 addr_idx = bd_ring->addr.idx; 92 u32 cnt, idx; 93 94 idx = rtw89_read32(rtwdev, addr_idx); 95 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 96 97 return cnt; 98 } 99 100 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 101 struct rtw89_pci *rtwpci, 102 u32 cnt, bool release_all) 103 { 104 struct rtw89_pci_tx_data *tx_data; 105 struct sk_buff *skb; 106 u32 qlen; 107 108 while (cnt--) { 109 skb = skb_dequeue(&rtwpci->h2c_queue); 110 if (!skb) { 111 rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 112 return; 113 } 114 skb_queue_tail(&rtwpci->h2c_release_queue, skb); 115 } 116 117 qlen = skb_queue_len(&rtwpci->h2c_release_queue); 118 if (!release_all) 119 qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 120 121 while (qlen--) { 122 skb = skb_dequeue(&rtwpci->h2c_release_queue); 123 if (!skb) { 124 rtw89_err(rtwdev, "failed to release fwcmd\n"); 125 return; 126 } 127 tx_data = RTW89_PCI_TX_SKB_CB(skb); 128 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 129 DMA_TO_DEVICE); 130 dev_kfree_skb_any(skb); 131 } 132 } 133 134 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 135 struct rtw89_pci *rtwpci) 136 { 137 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12]; 138 u32 cnt; 139 140 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 141 if (!cnt) 142 return; 143 rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 144 } 145 146 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 147 struct rtw89_pci_rx_ring *rx_ring) 148 { 149 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 150 u32 addr_idx = bd_ring->addr.idx; 151 u32 cnt, idx; 152 153 idx = rtw89_read32(rtwdev, addr_idx); 154 cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 155 156 return cnt; 157 } 158 159 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 160 struct sk_buff *skb) 161 { 162 struct rtw89_pci_rx_info *rx_info; 163 dma_addr_t dma; 164 165 rx_info = RTW89_PCI_RX_SKB_CB(skb); 166 dma = rx_info->dma; 167 dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 168 DMA_FROM_DEVICE); 169 } 170 171 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 172 struct sk_buff *skb) 173 { 174 struct rtw89_pci_rx_info *rx_info; 175 dma_addr_t dma; 176 177 rx_info = RTW89_PCI_RX_SKB_CB(skb); 178 dma = rx_info->dma; 179 dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 180 DMA_FROM_DEVICE); 181 } 182 183 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 184 struct sk_buff *skb) 185 { 186 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 187 struct rtw89_pci_rxbd_info *rxbd_info; 188 __le32 info; 189 190 rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 191 info = rxbd_info->dword; 192 193 rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); 194 rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); 195 rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); 196 rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); 197 } 198 199 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, 200 struct rtw89_pci_rx_ring *rx_ring, 201 struct sk_buff *skb) 202 { 203 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 204 const struct rtw89_pci_info *info = rtwdev->pci_info; 205 u32 target_rx_tag; 206 207 if (!info->check_rx_tag) 208 return 0; 209 210 /* valid range is 1 ~ 0x1FFF */ 211 if (rx_ring->target_rx_tag == 0) 212 target_rx_tag = 1; 213 else 214 target_rx_tag = rx_ring->target_rx_tag; 215 216 if (rx_info->tag != target_rx_tag) { 217 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n", 218 rx_info->tag, target_rx_tag); 219 return -EAGAIN; 220 } 221 222 return 0; 223 } 224 225 static 226 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev, 227 struct rtw89_pci_rx_ring *rx_ring, 228 struct sk_buff *skb) 229 { 230 struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 231 int rx_tag_retry = 1000; 232 int ret; 233 234 do { 235 rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 236 rtw89_pci_rxbd_info_update(rtwdev, skb); 237 238 ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb); 239 if (ret != -EAGAIN) 240 break; 241 } while (rx_tag_retry--); 242 243 /* update target rx_tag for next RX */ 244 rx_ring->target_rx_tag = rx_info->tag + 1; 245 246 return ret; 247 } 248 249 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable) 250 { 251 const struct rtw89_pci_info *info = rtwdev->pci_info; 252 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 253 const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2; 254 255 if (enable) { 256 rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask); 257 if (dma_stop2->addr) 258 rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask); 259 } else { 260 rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask); 261 if (dma_stop2->addr) 262 rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask); 263 } 264 } 265 266 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable) 267 { 268 const struct rtw89_pci_info *info = rtwdev->pci_info; 269 const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1; 270 271 if (enable) 272 rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 273 else 274 rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12); 275 } 276 277 static bool 278 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 279 struct sk_buff *new, 280 const struct sk_buff *skb, u32 offset, 281 const struct rtw89_pci_rx_info *rx_info, 282 const struct rtw89_rx_desc_info *desc_info) 283 { 284 u32 copy_len = rx_info->len - offset; 285 286 if (unlikely(skb_tailroom(new) < copy_len)) { 287 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 288 "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 289 rx_info->len, desc_info->pkt_size, offset, fs, ls); 290 rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 291 skb->data, rx_info->len); 292 /* length of a single segment skb is desc_info->pkt_size */ 293 if (fs && ls) { 294 copy_len = desc_info->pkt_size; 295 } else { 296 rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 297 return false; 298 } 299 } 300 301 skb_put_data(new, skb->data + offset, copy_len); 302 303 return true; 304 } 305 306 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev, 307 struct rtw89_pci_dma_ring *bd_ring) 308 { 309 const struct rtw89_pci_info *info = rtwdev->pci_info; 310 u32 wp = bd_ring->wp; 311 312 if (!info->rx_ring_eq_is_full) 313 return wp; 314 315 if (++wp >= bd_ring->len) 316 wp = 0; 317 318 return wp; 319 } 320 321 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 322 struct rtw89_pci_rx_ring *rx_ring) 323 { 324 struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 325 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 326 const struct rtw89_pci_info *info = rtwdev->pci_info; 327 struct sk_buff *new = rx_ring->diliver_skb; 328 struct rtw89_pci_rx_info *rx_info; 329 struct sk_buff *skb; 330 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 331 u32 skb_idx; 332 u32 offset; 333 u32 cnt = 1; 334 bool fs, ls; 335 int ret; 336 337 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 338 skb = rx_ring->buf[skb_idx]; 339 340 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 341 if (ret) { 342 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 343 bd_ring->wp, ret); 344 goto err_sync_device; 345 } 346 347 rx_info = RTW89_PCI_RX_SKB_CB(skb); 348 fs = info->no_rxbd_fs ? !new : rx_info->fs; 349 ls = rx_info->ls; 350 351 if (unlikely(!fs || !ls)) 352 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 353 "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n", 354 fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0); 355 356 if (fs) { 357 if (new) { 358 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 359 "skb should not be ready before first segment start\n"); 360 goto err_sync_device; 361 } 362 if (desc_info->ready) { 363 rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 364 goto err_sync_device; 365 } 366 367 rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 368 369 new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size); 370 if (!new) 371 goto err_sync_device; 372 373 rx_ring->diliver_skb = new; 374 375 /* first segment has RX desc */ 376 offset = desc_info->offset + desc_info->rxd_len; 377 } else { 378 offset = sizeof(struct rtw89_pci_rxbd_info); 379 if (!new) { 380 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 381 goto err_sync_device; 382 } 383 } 384 if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 385 goto err_sync_device; 386 rtw89_pci_sync_skb_for_device(rtwdev, skb); 387 rtw89_pci_rxbd_increase(rx_ring, 1); 388 389 if (!desc_info->ready) { 390 rtw89_warn(rtwdev, "no rx desc information\n"); 391 goto err_free_resource; 392 } 393 if (ls) { 394 rtw89_core_rx(rtwdev, desc_info, new); 395 rx_ring->diliver_skb = NULL; 396 desc_info->ready = false; 397 } 398 399 return cnt; 400 401 err_sync_device: 402 rtw89_pci_sync_skb_for_device(rtwdev, skb); 403 rtw89_pci_rxbd_increase(rx_ring, 1); 404 err_free_resource: 405 if (new) 406 dev_kfree_skb_any(new); 407 rx_ring->diliver_skb = NULL; 408 desc_info->ready = false; 409 410 return cnt; 411 } 412 413 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 414 struct rtw89_pci_rx_ring *rx_ring, 415 u32 cnt) 416 { 417 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 418 u32 rx_cnt; 419 420 while (cnt && rtwdev->napi_budget_countdown > 0) { 421 rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 422 if (!rx_cnt) { 423 rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 424 425 /* skip the rest RXBD bufs */ 426 rtw89_pci_rxbd_increase(rx_ring, cnt); 427 break; 428 } 429 430 cnt -= rx_cnt; 431 } 432 433 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 434 } 435 436 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 437 struct rtw89_pci *rtwpci, int budget) 438 { 439 struct rtw89_pci_rx_ring *rx_ring; 440 int countdown = rtwdev->napi_budget_countdown; 441 u32 cnt; 442 443 rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RXQ]; 444 445 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 446 if (!cnt) 447 return 0; 448 449 cnt = min_t(u32, budget, cnt); 450 451 rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 452 453 /* In case of flushing pending SKBs, the countdown may exceed. */ 454 if (rtwdev->napi_budget_countdown <= 0) 455 return budget; 456 457 return budget - countdown; 458 } 459 460 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 461 struct rtw89_pci_tx_ring *tx_ring, 462 struct sk_buff *skb, u8 tx_status) 463 { 464 struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb); 465 struct ieee80211_tx_info *info; 466 467 if (rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE)) 468 return; 469 470 info = IEEE80211_SKB_CB(skb); 471 ieee80211_tx_info_clear_status(info); 472 473 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 474 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 475 if (tx_status == RTW89_TX_DONE) { 476 info->flags |= IEEE80211_TX_STAT_ACK; 477 tx_ring->tx_acked++; 478 } else { 479 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 480 rtw89_debug(rtwdev, RTW89_DBG_FW, 481 "failed to TX of status %x\n", tx_status); 482 switch (tx_status) { 483 case RTW89_TX_RETRY_LIMIT: 484 tx_ring->tx_retry_lmt++; 485 break; 486 case RTW89_TX_LIFE_TIME: 487 tx_ring->tx_life_time++; 488 break; 489 case RTW89_TX_MACID_DROP: 490 tx_ring->tx_mac_id_drop++; 491 break; 492 default: 493 rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 494 break; 495 } 496 } 497 498 ieee80211_tx_status_ni(rtwdev->hw, skb); 499 } 500 501 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 502 { 503 struct rtw89_pci_tx_wd *txwd; 504 u32 cnt; 505 506 cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 507 while (cnt--) { 508 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 509 if (!txwd) { 510 rtw89_warn(rtwdev, "No busy txwd pages available\n"); 511 break; 512 } 513 514 list_del_init(&txwd->list); 515 516 /* this skb has been freed by RPP */ 517 if (skb_queue_len(&txwd->queue) == 0) 518 rtw89_pci_enqueue_txwd(tx_ring, txwd); 519 } 520 } 521 522 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 523 struct rtw89_pci_tx_ring *tx_ring) 524 { 525 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 526 struct rtw89_pci_tx_wd *txwd; 527 int i; 528 529 for (i = 0; i < wd_ring->page_num; i++) { 530 txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 531 if (!txwd) 532 break; 533 534 list_del_init(&txwd->list); 535 } 536 } 537 538 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 539 struct rtw89_pci_tx_ring *tx_ring, 540 struct rtw89_pci_tx_wd *txwd, u16 seq, 541 u8 tx_status) 542 { 543 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 544 struct rtw89_pci_tx_data *tx_data; 545 struct sk_buff *skb, *tmp; 546 u8 txch = tx_ring->txch; 547 548 if (!list_empty(&txwd->list)) { 549 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 550 /* In low power mode, RPP can receive before updating of TX BD. 551 * In normal mode, it should not happen so give it a warning. 552 */ 553 if (!rtwpci->low_power && !list_empty(&txwd->list)) 554 rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 555 txch, seq); 556 } 557 558 skb_queue_walk_safe(&txwd->queue, skb, tmp) { 559 skb_unlink(skb, &txwd->queue); 560 561 tx_data = RTW89_PCI_TX_SKB_CB(skb); 562 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 563 DMA_TO_DEVICE); 564 565 rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 566 } 567 568 if (list_empty(&txwd->list)) 569 rtw89_pci_enqueue_txwd(tx_ring, txwd); 570 } 571 572 void rtw89_pci_parse_rpp(struct rtw89_dev *rtwdev, void *_rpp, 573 struct rtw89_pci_rpp_info *rpp_info) 574 { 575 const struct rtw89_pci_rpp_fmt *rpp = _rpp; 576 577 rpp_info->seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 578 rpp_info->qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 579 rpp_info->tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 580 rpp_info->txch = rtw89_chip_get_ch_dma(rtwdev, rpp_info->qsel); 581 } 582 EXPORT_SYMBOL(rtw89_pci_parse_rpp); 583 584 void rtw89_pci_parse_rpp_v1(struct rtw89_dev *rtwdev, void *_rpp, 585 struct rtw89_pci_rpp_info *rpp_info) 586 { 587 const struct rtw89_pci_rpp_fmt_v1 *rpp = _rpp; 588 589 rpp_info->seq = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_PCIE_SEQ_V1_MASK); 590 rpp_info->qsel = le32_get_bits(rpp->w1, RTW89_PCI_RPP_W1_QSEL_V1_MASK); 591 rpp_info->tx_status = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_TX_STATUS_V1_MASK); 592 rpp_info->txch = le32_get_bits(rpp->w0, RTW89_PCI_RPP_W0_DMA_CH_MASK); 593 } 594 EXPORT_SYMBOL(rtw89_pci_parse_rpp_v1); 595 596 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, void *rpp) 597 { 598 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 599 const struct rtw89_pci_info *info = rtwdev->pci_info; 600 struct rtw89_pci_rpp_info rpp_info = {}; 601 struct rtw89_pci_tx_wd_ring *wd_ring; 602 struct rtw89_pci_tx_ring *tx_ring; 603 struct rtw89_pci_tx_wd *txwd; 604 605 info->parse_rpp(rtwdev, rpp, &rpp_info); 606 607 if (rpp_info.txch == RTW89_TXCH_CH12) { 608 rtw89_warn(rtwdev, "should no fwcmd release report\n"); 609 return; 610 } 611 612 tx_ring = &rtwpci->tx.rings[rpp_info.txch]; 613 wd_ring = &tx_ring->wd_ring; 614 txwd = &wd_ring->pages[rpp_info.seq]; 615 616 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, rpp_info.seq, 617 rpp_info.tx_status); 618 } 619 620 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 621 struct rtw89_pci_tx_ring *tx_ring) 622 { 623 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 624 struct rtw89_pci_tx_wd *txwd; 625 int i; 626 627 for (i = 0; i < wd_ring->page_num; i++) { 628 txwd = &wd_ring->pages[i]; 629 630 if (!list_empty(&txwd->list)) 631 continue; 632 633 rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 634 } 635 } 636 637 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 638 struct rtw89_pci_rx_ring *rx_ring, 639 u32 max_cnt) 640 { 641 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 642 const struct rtw89_pci_info *info = rtwdev->pci_info; 643 struct rtw89_rx_desc_info desc_info = {}; 644 struct rtw89_pci_rx_info *rx_info; 645 struct sk_buff *skb; 646 void *rpp; 647 u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 648 u32 rpp_size = info->rpp_fmt_size; 649 u32 cnt = 0; 650 u32 skb_idx; 651 u32 offset; 652 int ret; 653 654 skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring); 655 skb = rx_ring->buf[skb_idx]; 656 657 ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb); 658 if (ret) { 659 rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 660 bd_ring->wp, ret); 661 goto err_sync_device; 662 } 663 664 rx_info = RTW89_PCI_RX_SKB_CB(skb); 665 if (!rx_info->fs || !rx_info->ls) { 666 rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 667 return cnt; 668 } 669 670 rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 671 672 /* first segment has RX desc */ 673 offset = desc_info.offset + desc_info.rxd_len; 674 for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 675 rpp = skb->data + offset; 676 rtw89_pci_release_rpp(rtwdev, rpp); 677 } 678 679 rtw89_pci_sync_skb_for_device(rtwdev, skb); 680 rtw89_pci_rxbd_increase(rx_ring, 1); 681 cnt++; 682 683 return cnt; 684 685 err_sync_device: 686 rtw89_pci_sync_skb_for_device(rtwdev, skb); 687 return 0; 688 } 689 690 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 691 struct rtw89_pci_rx_ring *rx_ring, 692 u32 cnt) 693 { 694 struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 695 u32 release_cnt; 696 697 while (cnt) { 698 release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 699 if (!release_cnt) { 700 rtw89_err(rtwdev, "failed to release TX skbs\n"); 701 702 /* skip the rest RXBD bufs */ 703 rtw89_pci_rxbd_increase(rx_ring, cnt); 704 break; 705 } 706 707 cnt -= release_cnt; 708 } 709 710 rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 711 } 712 713 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 714 struct rtw89_pci *rtwpci, int budget) 715 { 716 struct rtw89_pci_rx_ring *rx_ring; 717 u32 cnt; 718 int work_done; 719 720 rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ]; 721 722 spin_lock_bh(&rtwpci->trx_lock); 723 724 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 725 if (cnt == 0) 726 goto out_unlock; 727 728 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 729 730 out_unlock: 731 spin_unlock_bh(&rtwpci->trx_lock); 732 733 /* always release all RPQ */ 734 work_done = min_t(int, cnt, budget); 735 rtwdev->napi_budget_countdown -= work_done; 736 737 return work_done; 738 } 739 740 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 741 struct rtw89_pci *rtwpci) 742 { 743 struct rtw89_pci_rx_ring *rx_ring; 744 struct rtw89_pci_dma_ring *bd_ring; 745 u32 reg_idx; 746 u16 hw_idx, hw_idx_next, host_idx; 747 int i; 748 749 for (i = 0; i < RTW89_RXCH_NUM; i++) { 750 rx_ring = &rtwpci->rx.rings[i]; 751 bd_ring = &rx_ring->bd_ring; 752 753 reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 754 hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 755 host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 756 hw_idx_next = (hw_idx + 1) % bd_ring->len; 757 758 if (hw_idx_next == host_idx) 759 rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 760 761 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 762 "%d RXD unavailable, idx=0x%08x, len=%d\n", 763 i, reg_idx, bd_ring->len); 764 } 765 } 766 767 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 768 struct rtw89_pci *rtwpci, 769 struct rtw89_pci_isrs *isrs) 770 { 771 isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 772 isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 773 isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 774 775 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 776 rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 777 rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 778 } 779 EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 780 781 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 782 struct rtw89_pci *rtwpci, 783 struct rtw89_pci_isrs *isrs) 784 { 785 isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 786 isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 787 rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 788 isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 789 rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 790 isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 791 rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 792 793 if (isrs->halt_c2h_isrs) 794 rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 795 if (isrs->isrs[0]) 796 rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 797 if (isrs->isrs[1]) 798 rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 799 } 800 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 801 802 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev, 803 struct rtw89_pci *rtwpci, 804 struct rtw89_pci_isrs *isrs) 805 { 806 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 807 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 808 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 809 isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ? 810 rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0; 811 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; 812 813 if (isrs->halt_c2h_isrs) 814 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 815 if (isrs->isrs[0]) 816 rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]); 817 if (isrs->isrs[1]) 818 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 819 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 820 } 821 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2); 822 823 void rtw89_pci_recognize_intrs_v3(struct rtw89_dev *rtwdev, 824 struct rtw89_pci *rtwpci, 825 struct rtw89_pci_isrs *isrs) 826 { 827 isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs; 828 isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ? 829 rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0; 830 isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1]; 831 832 /* isrs[0] is not used, so borrow to store RDU status to share common 833 * flow in rtw89_pci_interrupt_threadfn(). 834 */ 835 isrs->isrs[0] = isrs->isrs[1] & (B_BE_PCIE_RDU_CH1_INT | 836 B_BE_PCIE_RDU_CH0_INT); 837 838 if (isrs->halt_c2h_isrs) 839 rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs); 840 if (isrs->isrs[1]) 841 rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]); 842 rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs); 843 } 844 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v3); 845 846 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 847 { 848 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 849 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 850 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 851 } 852 EXPORT_SYMBOL(rtw89_pci_enable_intr); 853 854 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 855 { 856 rtw89_write32(rtwdev, R_AX_HIMR0, 0); 857 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 858 rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 859 } 860 EXPORT_SYMBOL(rtw89_pci_disable_intr); 861 862 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 863 { 864 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 865 rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 866 rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 867 rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 868 } 869 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 870 871 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 872 { 873 rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 874 } 875 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 876 877 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 878 { 879 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 880 rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]); 881 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 882 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 883 } 884 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2); 885 886 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 887 { 888 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 889 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 890 } 891 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2); 892 893 void rtw89_pci_enable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 894 { 895 rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs); 896 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]); 897 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs); 898 } 899 EXPORT_SYMBOL(rtw89_pci_enable_intr_v3); 900 901 void rtw89_pci_disable_intr_v3(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 902 { 903 rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0); 904 rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0); 905 } 906 EXPORT_SYMBOL(rtw89_pci_disable_intr_v3); 907 908 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 909 { 910 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 911 unsigned long flags; 912 913 spin_lock_irqsave(&rtwpci->irq_lock, flags); 914 rtw89_chip_disable_intr(rtwdev, rtwpci); 915 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 916 rtw89_chip_enable_intr(rtwdev, rtwpci); 917 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 918 } 919 920 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 921 { 922 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 923 unsigned long flags; 924 925 spin_lock_irqsave(&rtwpci->irq_lock, flags); 926 rtw89_chip_disable_intr(rtwdev, rtwpci); 927 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 928 rtw89_chip_enable_intr(rtwdev, rtwpci); 929 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 930 } 931 932 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 933 { 934 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 935 int budget = NAPI_POLL_WEIGHT; 936 937 /* To prevent RXQ get stuck due to run out of budget. */ 938 rtwdev->napi_budget_countdown = budget; 939 940 rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 941 rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 942 } 943 944 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 945 { 946 struct rtw89_dev *rtwdev = dev; 947 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 948 const struct rtw89_pci_info *info = rtwdev->pci_info; 949 const struct rtw89_pci_isr_def *isr_def = info->isr_def; 950 struct rtw89_pci_isrs isrs; 951 unsigned long flags; 952 953 spin_lock_irqsave(&rtwpci->irq_lock, flags); 954 rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 955 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 956 957 if (unlikely(isrs.isrs[0] & isr_def->isr_rdu)) 958 rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 959 960 if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_halt_c2h)) 961 rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 962 963 if (unlikely(isrs.halt_c2h_isrs & isr_def->isr_wdt_timeout)) 964 rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 965 966 if (unlikely(rtwpci->under_recovery)) 967 goto enable_intr; 968 969 if (unlikely(rtwpci->low_power)) { 970 rtw89_pci_low_power_interrupt_handler(rtwdev); 971 goto enable_intr; 972 } 973 974 if (likely(rtwpci->running)) { 975 local_bh_disable(); 976 napi_schedule(&rtwdev->napi); 977 local_bh_enable(); 978 } 979 980 return IRQ_HANDLED; 981 982 enable_intr: 983 spin_lock_irqsave(&rtwpci->irq_lock, flags); 984 if (likely(rtwpci->running)) 985 rtw89_chip_enable_intr(rtwdev, rtwpci); 986 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 987 return IRQ_HANDLED; 988 } 989 990 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 991 { 992 struct rtw89_dev *rtwdev = dev; 993 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 994 unsigned long flags; 995 irqreturn_t irqret = IRQ_WAKE_THREAD; 996 997 spin_lock_irqsave(&rtwpci->irq_lock, flags); 998 999 /* If interrupt event is on the road, it is still trigger interrupt 1000 * even we have done pci_stop() to turn off IMR. 1001 */ 1002 if (unlikely(!rtwpci->running)) { 1003 irqret = IRQ_HANDLED; 1004 goto exit; 1005 } 1006 1007 rtw89_chip_disable_intr(rtwdev, rtwpci); 1008 exit: 1009 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1010 1011 return irqret; 1012 } 1013 1014 #define DEF_TXCHADDRS_TYPE3(gen, ch_idx, txch, v...) \ 1015 [RTW89_TXCH_##ch_idx] = { \ 1016 .num = R_##gen##_##txch##_TXBD_CFG, \ 1017 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 1018 .bdram = 0, \ 1019 .desa_l = 0, \ 1020 .desa_h = 0, \ 1021 } 1022 1023 #define DEF_TXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, txch, grp, v...) \ 1024 [RTW89_TXCH_##ch_idx] = { \ 1025 .num = R_##gen##_##txch##_TXBD_CFG, \ 1026 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 1027 .bdram = 0, \ 1028 .desa_l = R_##gen##_##grp##_TXBD_DESA_L, \ 1029 .desa_h = R_##gen##_##grp##_TXBD_DESA_H, \ 1030 } 1031 1032 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \ 1033 [RTW89_TXCH_##ch_idx] = { \ 1034 .num = R_##gen##_##txch##_TXBD_NUM ##v, \ 1035 .idx = R_##gen##_##txch##_TXBD_IDX ##v, \ 1036 .bdram = 0, \ 1037 .desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \ 1038 .desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \ 1039 } 1040 1041 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 1042 [RTW89_TXCH_##txch] = { \ 1043 .num = R_AX_##txch##_TXBD_NUM ##v, \ 1044 .idx = R_AX_##txch##_TXBD_IDX ##v, \ 1045 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 1046 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 1047 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 1048 } 1049 1050 #define DEF_TXCHADDRS(info, txch, v...) \ 1051 [RTW89_TXCH_##txch] = { \ 1052 .num = R_AX_##txch##_TXBD_NUM, \ 1053 .idx = R_AX_##txch##_TXBD_IDX, \ 1054 .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 1055 .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 1056 .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 1057 } 1058 1059 #define DEF_RXCHADDRS_TYPE3(gen, ch_idx, rxch, v...) \ 1060 [RTW89_RXCH_##ch_idx] = { \ 1061 .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \ 1062 .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \ 1063 .desa_l = 0, \ 1064 .desa_h = 0, \ 1065 } 1066 1067 #define DEF_RXCHADDRS_TYPE3_GRP_BASE(gen, ch_idx, rxch, grp, v...) \ 1068 [RTW89_RXCH_##ch_idx] = { \ 1069 .num = R_##gen##_RX_##rxch##_RXBD_CONFIG, \ 1070 .idx = R_##gen##_##ch_idx##0_RXBD_IDX ##v, \ 1071 .desa_l = R_##gen##_##grp##_RXBD_DESA_L, \ 1072 .desa_h = R_##gen##_##grp##_RXBD_DESA_H, \ 1073 } 1074 1075 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \ 1076 [RTW89_RXCH_##ch_idx] = { \ 1077 .num = R_##gen##_##rxch##_RXBD_NUM ##v, \ 1078 .idx = R_##gen##_##rxch##_RXBD_IDX ##v, \ 1079 .desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \ 1080 .desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \ 1081 } 1082 1083 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 1084 .tx = { 1085 DEF_TXCHADDRS(info, ACH0), 1086 DEF_TXCHADDRS(info, ACH1), 1087 DEF_TXCHADDRS(info, ACH2), 1088 DEF_TXCHADDRS(info, ACH3), 1089 DEF_TXCHADDRS(info, ACH4), 1090 DEF_TXCHADDRS(info, ACH5), 1091 DEF_TXCHADDRS(info, ACH6), 1092 DEF_TXCHADDRS(info, ACH7), 1093 DEF_TXCHADDRS(info, CH8), 1094 DEF_TXCHADDRS(info, CH9), 1095 DEF_TXCHADDRS_TYPE1(info, CH10), 1096 DEF_TXCHADDRS_TYPE1(info, CH11), 1097 DEF_TXCHADDRS(info, CH12), 1098 }, 1099 .rx = { 1100 DEF_RXCHADDRS(AX, RXQ, RXQ), 1101 DEF_RXCHADDRS(AX, RPQ, RPQ), 1102 }, 1103 }; 1104 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 1105 1106 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 1107 .tx = { 1108 DEF_TXCHADDRS(info, ACH0, _V1), 1109 DEF_TXCHADDRS(info, ACH1, _V1), 1110 DEF_TXCHADDRS(info, ACH2, _V1), 1111 DEF_TXCHADDRS(info, ACH3, _V1), 1112 DEF_TXCHADDRS(info, ACH4, _V1), 1113 DEF_TXCHADDRS(info, ACH5, _V1), 1114 DEF_TXCHADDRS(info, ACH6, _V1), 1115 DEF_TXCHADDRS(info, ACH7, _V1), 1116 DEF_TXCHADDRS(info, CH8, _V1), 1117 DEF_TXCHADDRS(info, CH9, _V1), 1118 DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 1119 DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 1120 DEF_TXCHADDRS(info, CH12, _V1), 1121 }, 1122 .rx = { 1123 DEF_RXCHADDRS(AX, RXQ, RXQ, _V1), 1124 DEF_RXCHADDRS(AX, RPQ, RPQ, _V1), 1125 }, 1126 }; 1127 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 1128 1129 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = { 1130 .tx = { 1131 DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1), 1132 DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1), 1133 DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1), 1134 DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1), 1135 DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1), 1136 DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1), 1137 DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1), 1138 DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1), 1139 DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1), 1140 DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1), 1141 DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1), 1142 DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1), 1143 DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1), 1144 }, 1145 .rx = { 1146 DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1), 1147 DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1), 1148 }, 1149 }; 1150 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be); 1151 1152 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be_v1 = { 1153 .tx = { 1154 DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, ACH0, CH0, ACQ, _V1), 1155 /* no CH1 */ 1156 DEF_TXCHADDRS_TYPE3(BE, ACH2, CH2, _V1), 1157 /* no CH3 */ 1158 DEF_TXCHADDRS_TYPE3(BE, ACH4, CH4, _V1), 1159 /* no CH5 */ 1160 DEF_TXCHADDRS_TYPE3(BE, ACH6, CH6, _V1), 1161 /* no CH7 */ 1162 DEF_TXCHADDRS_TYPE3_GRP_BASE(BE, CH8, CH8, NACQ, _V1), 1163 /* no CH9 */ 1164 DEF_TXCHADDRS_TYPE3(BE, CH10, CH10, _V1), 1165 /* no CH11 */ 1166 DEF_TXCHADDRS_TYPE3(BE, CH12, CH12, _V1), 1167 }, 1168 .rx = { 1169 DEF_RXCHADDRS_TYPE3_GRP_BASE(BE, RXQ, CH0, HOST0, _V1), 1170 DEF_RXCHADDRS_TYPE3(BE, RPQ, CH1, _V1), 1171 }, 1172 }; 1173 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be_v1); 1174 1175 #undef DEF_TXCHADDRS_TYPE3 1176 #undef DEF_TXCHADDRS_TYPE3_GRP_BASE 1177 #undef DEF_TXCHADDRS_TYPE2 1178 #undef DEF_TXCHADDRS_TYPE1 1179 #undef DEF_TXCHADDRS 1180 #undef DEF_RXCHADDRS_TYPE3 1181 #undef DEF_RXCHADDRS_TYPE3_GRP_BASE 1182 #undef DEF_RXCHADDRS 1183 1184 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 1185 enum rtw89_tx_channel txch, 1186 const struct rtw89_pci_ch_dma_addr **addr) 1187 { 1188 const struct rtw89_pci_info *info = rtwdev->pci_info; 1189 1190 if (txch >= RTW89_TXCH_NUM) 1191 return -EINVAL; 1192 1193 *addr = &info->dma_addr_set->tx[txch]; 1194 1195 return 0; 1196 } 1197 1198 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 1199 enum rtw89_rx_channel rxch, 1200 const struct rtw89_pci_ch_dma_addr **addr) 1201 { 1202 const struct rtw89_pci_info *info = rtwdev->pci_info; 1203 1204 if (rxch >= RTW89_RXCH_NUM) 1205 return -EINVAL; 1206 1207 *addr = &info->dma_addr_set->rx[rxch]; 1208 1209 return 0; 1210 } 1211 1212 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 1213 { 1214 struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 1215 1216 /* reserved 1 desc check ring is full or not */ 1217 if (bd_ring->rp > bd_ring->wp) 1218 return bd_ring->rp - bd_ring->wp - 1; 1219 1220 return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 1221 } 1222 1223 static 1224 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 1225 { 1226 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1227 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[RTW89_TXCH_CH12]; 1228 u32 cnt; 1229 1230 spin_lock_bh(&rtwpci->trx_lock); 1231 rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 1232 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1233 spin_unlock_bh(&rtwpci->trx_lock); 1234 1235 return cnt; 1236 } 1237 1238 static 1239 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 1240 u8 txch) 1241 { 1242 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1243 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch]; 1244 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1245 u32 cnt; 1246 1247 spin_lock_bh(&rtwpci->trx_lock); 1248 cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1249 if (txch != RTW89_TXCH_CH12) 1250 cnt = min(cnt, wd_ring->curr_num); 1251 spin_unlock_bh(&rtwpci->trx_lock); 1252 1253 return cnt; 1254 } 1255 1256 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1257 u8 txch) 1258 { 1259 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1260 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch]; 1261 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 1262 const struct rtw89_chip_info *chip = rtwdev->chip; 1263 u32 bd_cnt, wd_cnt, min_cnt = 0; 1264 struct rtw89_pci_rx_ring *rx_ring; 1265 enum rtw89_debug_mask debug_mask; 1266 u32 cnt; 1267 1268 rx_ring = &rtwpci->rx.rings[RTW89_RXCH_RPQ]; 1269 1270 spin_lock_bh(&rtwpci->trx_lock); 1271 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1272 wd_cnt = wd_ring->curr_num; 1273 1274 if (wd_cnt == 0 || bd_cnt == 0) { 1275 cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 1276 if (cnt) 1277 rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 1278 else if (wd_cnt == 0) 1279 goto out_unlock; 1280 1281 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1282 if (bd_cnt == 0) 1283 rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 1284 } 1285 1286 bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 1287 wd_cnt = wd_ring->curr_num; 1288 min_cnt = min(bd_cnt, wd_cnt); 1289 if (min_cnt == 0) { 1290 /* This message can be frequently shown in low power mode or 1291 * high traffic with small FIFO chips, and we have recognized it as normal 1292 * behavior, so print with mask RTW89_DBG_TXRX in these situations. 1293 */ 1294 if (rtwpci->low_power || chip->small_fifo_size) 1295 debug_mask = RTW89_DBG_TXRX; 1296 else 1297 debug_mask = RTW89_DBG_UNEXP; 1298 1299 rtw89_debug(rtwdev, debug_mask, 1300 "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 1301 wd_cnt, bd_cnt); 1302 } 1303 1304 out_unlock: 1305 spin_unlock_bh(&rtwpci->trx_lock); 1306 1307 return min_cnt; 1308 } 1309 1310 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 1311 u8 txch) 1312 { 1313 if (rtwdev->hci.paused) 1314 return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 1315 1316 if (txch == RTW89_TXCH_CH12) 1317 return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 1318 1319 return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 1320 } 1321 1322 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 1323 { 1324 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1325 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1326 u32 host_idx, addr; 1327 1328 spin_lock_bh(&rtwpci->trx_lock); 1329 1330 addr = bd_ring->addr.idx; 1331 host_idx = bd_ring->wp; 1332 rtw89_write16(rtwdev, addr, host_idx); 1333 1334 spin_unlock_bh(&rtwpci->trx_lock); 1335 } 1336 1337 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1338 int n_txbd) 1339 { 1340 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1341 u32 host_idx, len; 1342 1343 len = bd_ring->len; 1344 host_idx = bd_ring->wp + n_txbd; 1345 host_idx = host_idx < len ? host_idx : host_idx - len; 1346 1347 bd_ring->wp = host_idx; 1348 } 1349 1350 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1351 { 1352 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1353 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch]; 1354 1355 if (rtwdev->hci.paused) { 1356 set_bit(txch, rtwpci->kick_map); 1357 return; 1358 } 1359 1360 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1361 } 1362 1363 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1364 { 1365 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1366 struct rtw89_pci_tx_ring *tx_ring; 1367 int txch; 1368 1369 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1370 if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1371 continue; 1372 1373 tx_ring = &rtwpci->tx.rings[txch]; 1374 __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1375 } 1376 } 1377 1378 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1379 { 1380 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1381 struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx.rings[txch]; 1382 struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1383 u32 cur_idx, cur_rp; 1384 u8 i; 1385 1386 /* Because the time taked by the I/O is a bit dynamic, it's hard to 1387 * define a reasonable fixed total timeout to use read_poll_timeout* 1388 * helper. Instead, we can ensure a reasonable polling times, so we 1389 * just use for loop with udelay here. 1390 */ 1391 for (i = 0; i < 60; i++) { 1392 cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1393 cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1394 if (cur_rp == bd_ring->wp) 1395 return; 1396 1397 udelay(1); 1398 } 1399 1400 if (!drop) 1401 rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1402 } 1403 1404 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1405 bool drop) 1406 { 1407 const struct rtw89_pci_info *info = rtwdev->pci_info; 1408 u8 i; 1409 1410 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1411 /* It may be unnecessary to flush FWCMD queue. */ 1412 if (i == RTW89_TXCH_CH12) 1413 continue; 1414 if (info->tx_dma_ch_mask & BIT(i)) 1415 continue; 1416 1417 if (txchs & BIT(i)) 1418 __pci_flush_txch(rtwdev, i, drop); 1419 } 1420 } 1421 1422 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1423 bool drop) 1424 { 1425 __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1426 } 1427 1428 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1429 void *txaddr_info_addr, u32 total_len, 1430 dma_addr_t dma, u8 *add_info_nr) 1431 { 1432 struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1433 __le16 option; 1434 1435 txaddr_info->length = cpu_to_le16(total_len); 1436 option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1)); 1437 option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK); 1438 txaddr_info->option = option; 1439 txaddr_info->dma = cpu_to_le32(dma); 1440 1441 *add_info_nr = 1; 1442 1443 return sizeof(*txaddr_info); 1444 } 1445 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1446 1447 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1448 void *txaddr_info_addr, u32 total_len, 1449 dma_addr_t dma, u8 *add_info_nr) 1450 { 1451 struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1452 u32 remain = total_len; 1453 u32 len; 1454 u16 length_option; 1455 int n; 1456 1457 for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1458 len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1459 TXADDR_INFO_LENTHG_V1_MAX : remain; 1460 remain -= len; 1461 1462 length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1463 FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1464 FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1465 length_option |= u16_encode_bits(upper_32_bits(dma), 1466 B_PCIADDR_HIGH_SEL_V1_MASK); 1467 txaddr_info->length_opt = cpu_to_le16(length_option); 1468 txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1469 txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1470 1471 dma += len; 1472 txaddr_info++; 1473 } 1474 1475 WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1476 remain, total_len); 1477 1478 *add_info_nr = n; 1479 1480 return n * sizeof(*txaddr_info); 1481 } 1482 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1483 1484 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1485 struct rtw89_pci_tx_ring *tx_ring, 1486 struct rtw89_pci_tx_wd *txwd, 1487 struct rtw89_core_tx_request *tx_req) 1488 { 1489 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1490 const struct rtw89_chip_info *chip = rtwdev->chip; 1491 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1492 struct rtw89_pci_tx_wp_info *txwp_info; 1493 void *txaddr_info_addr; 1494 struct pci_dev *pdev = rtwpci->pdev; 1495 struct sk_buff *skb = tx_req->skb; 1496 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1497 bool en_wd_info = desc_info->en_wd_info; 1498 u32 txwd_len; 1499 u32 txwp_len; 1500 u32 txaddr_info_len; 1501 dma_addr_t dma; 1502 int ret; 1503 1504 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1505 if (dma_mapping_error(&pdev->dev, dma)) { 1506 rtw89_err(rtwdev, "failed to map skb dma data\n"); 1507 ret = -EBUSY; 1508 goto err; 1509 } 1510 1511 tx_data->dma = dma; 1512 1513 txwp_len = sizeof(*txwp_info); 1514 txwd_len = chip->txwd_body_size; 1515 txwd_len += en_wd_info ? chip->txwd_info_size : 0; 1516 1517 txwp_info = txwd->vaddr + txwd_len; 1518 txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1519 txwp_info->seq1 = 0; 1520 txwp_info->seq2 = 0; 1521 txwp_info->seq3 = 0; 1522 1523 tx_ring->tx_cnt++; 1524 txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1525 txaddr_info_len = 1526 rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1527 dma, &desc_info->addr_info_nr); 1528 1529 txwd->len = txwd_len + txwp_len + txaddr_info_len; 1530 1531 rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1532 1533 skb_queue_tail(&txwd->queue, skb); 1534 1535 return 0; 1536 1537 err: 1538 return ret; 1539 } 1540 1541 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1542 struct rtw89_pci_tx_ring *tx_ring, 1543 struct rtw89_pci_tx_bd_32 *txbd, 1544 struct rtw89_core_tx_request *tx_req) 1545 { 1546 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1547 const struct rtw89_chip_info *chip = rtwdev->chip; 1548 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1549 void *txdesc; 1550 int txdesc_size = chip->h2c_desc_size; 1551 struct pci_dev *pdev = rtwpci->pdev; 1552 struct sk_buff *skb = tx_req->skb; 1553 struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1554 dma_addr_t dma; 1555 __le16 opt; 1556 1557 txdesc = skb_push(skb, txdesc_size); 1558 memset(txdesc, 0, txdesc_size); 1559 rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1560 1561 dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1562 if (dma_mapping_error(&pdev->dev, dma)) { 1563 rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1564 return -EBUSY; 1565 } 1566 1567 tx_data->dma = dma; 1568 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1569 opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI); 1570 txbd->opt = opt; 1571 txbd->length = cpu_to_le16(skb->len); 1572 txbd->dma = cpu_to_le32(tx_data->dma); 1573 skb_queue_tail(&rtwpci->h2c_queue, skb); 1574 1575 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1576 1577 return 0; 1578 } 1579 1580 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1581 struct rtw89_pci_tx_ring *tx_ring, 1582 struct rtw89_pci_tx_bd_32 *txbd, 1583 struct rtw89_core_tx_request *tx_req) 1584 { 1585 struct rtw89_pci_tx_wd *txwd; 1586 __le16 opt; 1587 int ret; 1588 1589 /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1590 * buffer with WD BODY only. So here we don't need to check the free 1591 * pages of the wd ring. 1592 */ 1593 if (tx_ring->txch == RTW89_TXCH_CH12) 1594 return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1595 1596 txwd = rtw89_pci_dequeue_txwd(tx_ring); 1597 if (!txwd) { 1598 rtw89_err(rtwdev, "no available TXWD\n"); 1599 ret = -ENOSPC; 1600 goto err; 1601 } 1602 1603 ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1604 if (ret) { 1605 rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1606 goto err_enqueue_wd; 1607 } 1608 1609 list_add_tail(&txwd->list, &tx_ring->busy_pages); 1610 1611 opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); 1612 opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI); 1613 txbd->opt = opt; 1614 txbd->length = cpu_to_le16(txwd->len); 1615 txbd->dma = cpu_to_le32(txwd->paddr); 1616 1617 rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1618 1619 return 0; 1620 1621 err_enqueue_wd: 1622 rtw89_pci_enqueue_txwd(tx_ring, txwd); 1623 err: 1624 return ret; 1625 } 1626 1627 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1628 u8 txch) 1629 { 1630 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1631 struct rtw89_pci_tx_ring *tx_ring; 1632 struct rtw89_pci_tx_bd_32 *txbd; 1633 u32 n_avail_txbd; 1634 int ret = 0; 1635 1636 /* check the tx type and dma channel for fw cmd queue */ 1637 if ((txch == RTW89_TXCH_CH12 || 1638 tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1639 (txch != RTW89_TXCH_CH12 || 1640 tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1641 rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1642 return -EINVAL; 1643 } 1644 1645 tx_ring = &rtwpci->tx.rings[txch]; 1646 spin_lock_bh(&rtwpci->trx_lock); 1647 1648 n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1649 if (n_avail_txbd == 0) { 1650 rtw89_err(rtwdev, "no available TXBD\n"); 1651 ret = -ENOSPC; 1652 goto err_unlock; 1653 } 1654 1655 txbd = rtw89_pci_get_next_txbd(tx_ring); 1656 ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1657 if (ret) { 1658 rtw89_err(rtwdev, "failed to submit TXBD\n"); 1659 goto err_unlock; 1660 } 1661 1662 spin_unlock_bh(&rtwpci->trx_lock); 1663 return 0; 1664 1665 err_unlock: 1666 spin_unlock_bh(&rtwpci->trx_lock); 1667 return ret; 1668 } 1669 1670 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1671 { 1672 struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1673 int ret; 1674 1675 ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1676 if (ret) { 1677 rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1678 return ret; 1679 } 1680 1681 return 0; 1682 } 1683 1684 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = { 1685 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1686 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1687 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1688 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1689 [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1690 [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1691 [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1692 [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1693 [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1694 [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1695 [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1696 [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1697 [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1698 }; 1699 EXPORT_SYMBOL(rtw89_bd_ram_table_dual); 1700 1701 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { 1702 [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1703 [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1704 [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1705 [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1706 [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1}, 1707 [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1}, 1708 [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1}, 1709 }; 1710 EXPORT_SYMBOL(rtw89_bd_ram_table_single); 1711 1712 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev) 1713 { 1714 const struct rtw89_pci_info *info = rtwdev->pci_info; 1715 u32 addr = info->wp_sel_addr; 1716 u32 val; 1717 int i; 1718 1719 if (!info->wp_sel_addr) 1720 return; 1721 1722 for (i = 0; i < 16; i += 4) { 1723 val = u32_encode_bits(i + 0, MASKBYTE0) | 1724 u32_encode_bits(i + 1, MASKBYTE1) | 1725 u32_encode_bits(i + 2, MASKBYTE2) | 1726 u32_encode_bits(i + 3, MASKBYTE3); 1727 rtw89_write32(rtwdev, addr + i, val); 1728 } 1729 } 1730 1731 static u16 rtw89_pci_enc_bd_cfg(struct rtw89_dev *rtwdev, u16 bd_num, 1732 u32 dma_offset) 1733 { 1734 u16 dma_offset_sel; 1735 u16 num_sel; 1736 1737 /* B_BE_TX_NUM_SEL_MASK, B_BE_RX_NUM_SEL_MASK: 1738 * 0 -> 0 1739 * 1 -> 64 = 2^6 1740 * 2 -> 128 = 2^7 1741 * ... 1742 * 7 -> 4096 = 2^12 1743 */ 1744 num_sel = ilog2(bd_num) - 5; 1745 1746 if (hweight16(bd_num) != 1) 1747 rtw89_warn(rtwdev, "bd_num %u is not power of 2\n", bd_num); 1748 1749 /* B_BE_TX_START_OFFSET_MASK, B_BE_RX_START_OFFSET_MASK: 1750 * 0 -> 0 = 0 * 2^9 1751 * 1 -> 512 = 1 * 2^9 1752 * 2 -> 1024 = 2 * 2^9 1753 * 3 -> 1536 = 3 * 2^9 1754 * ... 1755 * 255 -> 130560 = 255 * 2^9 1756 */ 1757 dma_offset_sel = dma_offset >> 9; 1758 1759 if (dma_offset % 512) 1760 rtw89_warn(rtwdev, "offset %u is not multiple of 512\n", dma_offset); 1761 1762 return u16_encode_bits(num_sel, B_BE_TX_NUM_SEL_MASK) | 1763 u16_encode_bits(dma_offset_sel, B_BE_TX_START_OFFSET_MASK); 1764 } 1765 1766 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1767 { 1768 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1769 const struct rtw89_pci_info *info = rtwdev->pci_info; 1770 const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table; 1771 struct rtw89_pci_tx_ring *tx_ring; 1772 struct rtw89_pci_rx_ring *rx_ring; 1773 struct rtw89_pci_dma_ring *bd_ring; 1774 const struct rtw89_pci_bd_ram *bd_ram; 1775 dma_addr_t group_dma_base = 0; 1776 u16 num_or_offset; 1777 u32 addr_desa_l; 1778 u32 addr_bdram; 1779 u32 addr_num; 1780 u32 addr_idx; 1781 u32 val32; 1782 int i; 1783 1784 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1785 if (info->tx_dma_ch_mask & BIT(i)) 1786 continue; 1787 1788 tx_ring = &rtwpci->tx.rings[i]; 1789 bd_ring = &tx_ring->bd_ring; 1790 bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL; 1791 addr_num = bd_ring->addr.num; 1792 addr_bdram = bd_ring->addr.bdram; 1793 addr_desa_l = bd_ring->addr.desa_l; 1794 bd_ring->wp = 0; 1795 bd_ring->rp = 0; 1796 1797 if (info->group_bd_addr) { 1798 if (addr_desa_l) 1799 group_dma_base = bd_ring->dma; 1800 1801 num_or_offset = 1802 rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len, 1803 bd_ring->dma - group_dma_base); 1804 } else { 1805 num_or_offset = bd_ring->len; 1806 } 1807 rtw89_write16(rtwdev, addr_num, num_or_offset); 1808 1809 if (addr_bdram && bd_ram) { 1810 val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1811 FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1812 FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1813 1814 rtw89_write32(rtwdev, addr_bdram, val32); 1815 } 1816 if (addr_desa_l) { 1817 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1818 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1819 } 1820 } 1821 1822 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1823 rx_ring = &rtwpci->rx.rings[i]; 1824 bd_ring = &rx_ring->bd_ring; 1825 addr_num = bd_ring->addr.num; 1826 addr_idx = bd_ring->addr.idx; 1827 addr_desa_l = bd_ring->addr.desa_l; 1828 if (info->rx_ring_eq_is_full) 1829 bd_ring->wp = bd_ring->len - 1; 1830 else 1831 bd_ring->wp = 0; 1832 bd_ring->rp = 0; 1833 rx_ring->diliver_skb = NULL; 1834 rx_ring->diliver_desc.ready = false; 1835 rx_ring->target_rx_tag = 0; 1836 1837 if (info->group_bd_addr) { 1838 if (addr_desa_l) 1839 group_dma_base = bd_ring->dma; 1840 1841 num_or_offset = 1842 rtw89_pci_enc_bd_cfg(rtwdev, bd_ring->len, 1843 bd_ring->dma - group_dma_base); 1844 } else { 1845 num_or_offset = bd_ring->len; 1846 } 1847 rtw89_write16(rtwdev, addr_num, num_or_offset); 1848 1849 if (addr_desa_l) { 1850 rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1851 rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); 1852 } 1853 1854 if (info->rx_ring_eq_is_full) 1855 rtw89_write16(rtwdev, addr_idx, bd_ring->wp); 1856 } 1857 1858 rtw89_pci_init_wp_16sel(rtwdev); 1859 } 1860 1861 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1862 struct rtw89_pci_tx_ring *tx_ring) 1863 { 1864 rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1865 rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1866 } 1867 1868 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1869 { 1870 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1871 const struct rtw89_pci_info *info = rtwdev->pci_info; 1872 int txch; 1873 1874 rtw89_pci_reset_trx_rings(rtwdev); 1875 1876 spin_lock_bh(&rtwpci->trx_lock); 1877 for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1878 if (info->tx_dma_ch_mask & BIT(txch)) 1879 continue; 1880 if (txch == RTW89_TXCH_CH12) { 1881 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1882 skb_queue_len(&rtwpci->h2c_queue), true); 1883 continue; 1884 } 1885 rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx.rings[txch]); 1886 } 1887 spin_unlock_bh(&rtwpci->trx_lock); 1888 } 1889 1890 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1891 { 1892 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1893 unsigned long flags; 1894 1895 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1896 rtwpci->running = true; 1897 rtw89_chip_enable_intr(rtwdev, rtwpci); 1898 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1899 } 1900 1901 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1902 { 1903 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1904 unsigned long flags; 1905 1906 spin_lock_irqsave(&rtwpci->irq_lock, flags); 1907 rtwpci->running = false; 1908 rtw89_chip_disable_intr(rtwdev, rtwpci); 1909 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1910 } 1911 1912 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1913 { 1914 rtw89_core_napi_start(rtwdev); 1915 rtw89_pci_enable_intr_lock(rtwdev); 1916 1917 return 0; 1918 } 1919 1920 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1921 { 1922 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1923 struct pci_dev *pdev = rtwpci->pdev; 1924 1925 rtw89_pci_disable_intr_lock(rtwdev); 1926 synchronize_irq(pdev->irq); 1927 rtw89_core_napi_stop(rtwdev); 1928 } 1929 1930 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1931 { 1932 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1933 struct pci_dev *pdev = rtwpci->pdev; 1934 1935 if (pause) { 1936 rtw89_pci_disable_intr_lock(rtwdev); 1937 synchronize_irq(pdev->irq); 1938 if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1939 napi_synchronize(&rtwdev->napi); 1940 } else { 1941 rtw89_pci_enable_intr_lock(rtwdev); 1942 rtw89_pci_tx_kick_off_pending(rtwdev); 1943 } 1944 } 1945 1946 static 1947 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1948 { 1949 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1950 const struct rtw89_pci_info *info = rtwdev->pci_info; 1951 const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1952 const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1953 struct rtw89_pci_tx_ring *tx_ring; 1954 struct rtw89_pci_rx_ring *rx_ring; 1955 int i; 1956 1957 if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1958 return; 1959 1960 for (i = 0; i < RTW89_TXCH_NUM; i++) { 1961 tx_ring = &rtwpci->tx.rings[i]; 1962 tx_ring->bd_ring.addr.idx = low_power ? 1963 bd_idx_addr->tx_bd_addrs[i] : 1964 dma_addr_set->tx[i].idx; 1965 } 1966 1967 for (i = 0; i < RTW89_RXCH_NUM; i++) { 1968 rx_ring = &rtwpci->rx.rings[i]; 1969 rx_ring->bd_ring.addr.idx = low_power ? 1970 bd_idx_addr->rx_bd_addrs[i] : 1971 dma_addr_set->rx[i].idx; 1972 } 1973 } 1974 1975 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1976 { 1977 enum rtw89_pci_intr_mask_cfg cfg; 1978 1979 WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1980 1981 cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1982 rtw89_chip_config_intr_mask(rtwdev, cfg); 1983 rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1984 } 1985 1986 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1987 1988 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1989 { 1990 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1991 u32 val = readl(rtwpci->mmap + addr); 1992 int count; 1993 1994 for (count = 0; ; count++) { 1995 if (val != RTW89_R32_DEAD) 1996 return val; 1997 if (count >= MAC_REG_POOL_COUNT) { 1998 rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1999 return RTW89_R32_DEAD; 2000 } 2001 rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 2002 val = readl(rtwpci->mmap + addr); 2003 } 2004 2005 return val; 2006 } 2007 2008 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 2009 { 2010 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2011 u32 addr32, val32, shift; 2012 2013 if (!ACCESS_CMAC(addr)) 2014 return readb(rtwpci->mmap + addr); 2015 2016 addr32 = addr & ~0x3; 2017 shift = (addr & 0x3) * 8; 2018 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 2019 return val32 >> shift; 2020 } 2021 2022 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 2023 { 2024 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2025 u32 addr32, val32, shift; 2026 2027 if (!ACCESS_CMAC(addr)) 2028 return readw(rtwpci->mmap + addr); 2029 2030 addr32 = addr & ~0x3; 2031 shift = (addr & 0x3) * 8; 2032 val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 2033 return val32 >> shift; 2034 } 2035 2036 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 2037 { 2038 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2039 2040 if (!ACCESS_CMAC(addr)) 2041 return readl(rtwpci->mmap + addr); 2042 2043 return rtw89_pci_ops_read32_cmac(rtwdev, addr); 2044 } 2045 2046 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 2047 { 2048 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2049 2050 writeb(data, rtwpci->mmap + addr); 2051 } 2052 2053 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 2054 { 2055 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2056 2057 writew(data, rtwpci->mmap + addr); 2058 } 2059 2060 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 2061 { 2062 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2063 2064 writel(data, rtwpci->mmap + addr); 2065 } 2066 2067 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable) 2068 { 2069 const struct rtw89_pci_info *info = rtwdev->pci_info; 2070 2071 if (enable) 2072 rtw89_write32_set(rtwdev, info->init_cfg_reg, 2073 info->rxhci_en_bit | info->txhci_en_bit); 2074 else 2075 rtw89_write32_clr(rtwdev, info->init_cfg_reg, 2076 info->rxhci_en_bit | info->txhci_en_bit); 2077 } 2078 2079 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable) 2080 { 2081 const struct rtw89_pci_info *info = rtwdev->pci_info; 2082 const struct rtw89_reg_def *reg = &info->dma_io_stop; 2083 2084 if (enable) 2085 rtw89_write32_clr(rtwdev, reg->addr, reg->mask); 2086 else 2087 rtw89_write32_set(rtwdev, reg->addr, reg->mask); 2088 } 2089 2090 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 2091 { 2092 rtw89_pci_ctrl_dma_io(rtwdev, enable); 2093 rtw89_pci_ctrl_dma_trx(rtwdev, enable); 2094 } 2095 2096 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 2097 { 2098 u16 val; 2099 2100 rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 2101 2102 val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 2103 switch (speed) { 2104 case PCIE_PHY_GEN1: 2105 if (addr < 0x20) 2106 val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 2107 else 2108 val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 2109 break; 2110 case PCIE_PHY_GEN2: 2111 if (addr < 0x20) 2112 val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 2113 else 2114 val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 2115 break; 2116 default: 2117 rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 2118 return -EINVAL; 2119 } 2120 rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 2121 rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 2122 2123 return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 2124 false, rtwdev, R_AX_MDIO_CFG); 2125 } 2126 2127 static int 2128 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 2129 { 2130 int ret; 2131 2132 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 2133 if (ret) { 2134 rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 2135 return ret; 2136 } 2137 *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 2138 2139 return 0; 2140 } 2141 2142 static int 2143 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 2144 { 2145 int ret; 2146 2147 rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 2148 ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 2149 if (ret) { 2150 rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 2151 return ret; 2152 } 2153 2154 return 0; 2155 } 2156 2157 static int 2158 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 2159 { 2160 u32 shift; 2161 int ret; 2162 u16 val; 2163 2164 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2165 if (ret) 2166 return ret; 2167 2168 shift = __ffs(mask); 2169 val &= ~mask; 2170 val |= ((data << shift) & mask); 2171 2172 ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 2173 if (ret) 2174 return ret; 2175 2176 return 0; 2177 } 2178 2179 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2180 { 2181 int ret; 2182 u16 val; 2183 2184 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2185 if (ret) 2186 return ret; 2187 ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 2188 if (ret) 2189 return ret; 2190 2191 return 0; 2192 } 2193 2194 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 2195 { 2196 int ret; 2197 u16 val; 2198 2199 ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 2200 if (ret) 2201 return ret; 2202 ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 2203 if (ret) 2204 return ret; 2205 2206 return 0; 2207 } 2208 2209 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data) 2210 { 2211 u16 addr_2lsb = addr & B_AX_DBI_2LSB; 2212 u16 write_addr; 2213 u8 flag; 2214 int ret; 2215 2216 write_addr = addr & B_AX_DBI_ADDR_MSK; 2217 write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK); 2218 rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data); 2219 rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr); 2220 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16); 2221 2222 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2223 10 * RTW89_PCI_WR_RETRY_CNT, false, 2224 rtwdev, R_AX_DBI_FLAG + 2); 2225 if (ret) 2226 rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n", 2227 addr); 2228 2229 return ret; 2230 } 2231 2232 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value) 2233 { 2234 u16 read_addr = addr & B_AX_DBI_ADDR_MSK; 2235 u8 flag; 2236 int ret; 2237 2238 rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr); 2239 rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16); 2240 2241 ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10, 2242 10 * RTW89_PCI_WR_RETRY_CNT, false, 2243 rtwdev, R_AX_DBI_FLAG + 2); 2244 if (ret) { 2245 rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n", 2246 addr); 2247 return ret; 2248 } 2249 2250 read_addr = R_AX_DBI_RDATA + (addr & 3); 2251 *value = rtw89_read8(rtwdev, read_addr); 2252 2253 return 0; 2254 } 2255 2256 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2257 u8 data) 2258 { 2259 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2260 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2261 struct pci_dev *pdev = rtwpci->pdev; 2262 int ret; 2263 2264 ret = pci_write_config_byte(pdev, addr, data); 2265 if (!ret) 2266 return 0; 2267 2268 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2269 ret = rtw89_dbi_write8(rtwdev, addr, data); 2270 2271 return ret; 2272 } 2273 2274 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 2275 u8 *value) 2276 { 2277 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2278 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2279 struct pci_dev *pdev = rtwpci->pdev; 2280 int ret; 2281 2282 ret = pci_read_config_byte(pdev, addr, value); 2283 if (!ret) 2284 return 0; 2285 2286 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) 2287 ret = rtw89_dbi_read8(rtwdev, addr, value); 2288 2289 return ret; 2290 } 2291 2292 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 2293 u8 bit) 2294 { 2295 u8 value; 2296 int ret; 2297 2298 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2299 if (ret) 2300 return ret; 2301 2302 value |= bit; 2303 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2304 2305 return ret; 2306 } 2307 2308 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 2309 u8 bit) 2310 { 2311 u8 value; 2312 int ret; 2313 2314 ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 2315 if (ret) 2316 return ret; 2317 2318 value &= ~bit; 2319 ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 2320 2321 return ret; 2322 } 2323 2324 static int 2325 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 2326 { 2327 u16 val, tar; 2328 int ret; 2329 2330 /* Enable counter */ 2331 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 2332 if (ret) 2333 return ret; 2334 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2335 phy_rate); 2336 if (ret) 2337 return ret; 2338 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 2339 phy_rate); 2340 if (ret) 2341 return ret; 2342 2343 fsleep(300); 2344 2345 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 2346 if (ret) 2347 return ret; 2348 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 2349 phy_rate); 2350 if (ret) 2351 return ret; 2352 2353 tar = tar & 0x0FFF; 2354 if (tar == 0 || tar == 0x0FFF) { 2355 rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 2356 return -EINVAL; 2357 } 2358 2359 *target = tar; 2360 2361 return 0; 2362 } 2363 2364 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) 2365 { 2366 int ret; 2367 2368 if (!rtw89_is_rtl885xb(rtwdev)) 2369 return 0; 2370 2371 ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, 2372 PCIE_AUTOK_4, PCIE_PHY_GEN1); 2373 return ret; 2374 } 2375 2376 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 2377 { 2378 enum rtw89_pcie_phy phy_rate; 2379 u16 val16, mgn_set, div_set, tar; 2380 u8 val8, bdr_ori; 2381 bool l1_flag = false; 2382 int ret = 0; 2383 2384 if (!rtw89_is_rtl885xb(rtwdev)) 2385 return 0; 2386 2387 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 2388 if (ret) { 2389 rtw89_err(rtwdev, "[ERR]pci config read %X\n", 2390 RTW89_PCIE_PHY_RATE); 2391 return ret; 2392 } 2393 2394 if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 2395 phy_rate = PCIE_PHY_GEN1; 2396 } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 2397 phy_rate = PCIE_PHY_GEN2; 2398 } else { 2399 rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 2400 return -EOPNOTSUPP; 2401 } 2402 /* Disable L1BD */ 2403 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 2404 if (ret) { 2405 rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 2406 return ret; 2407 } 2408 2409 if (bdr_ori & RTW89_PCIE_BIT_L1) { 2410 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2411 bdr_ori & ~RTW89_PCIE_BIT_L1); 2412 if (ret) { 2413 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2414 RTW89_PCIE_L1_CTRL); 2415 return ret; 2416 } 2417 l1_flag = true; 2418 } 2419 2420 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2421 if (ret) { 2422 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2423 goto end; 2424 } 2425 2426 if (val16 & B_AX_CALIB_EN) { 2427 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 2428 val16 & ~B_AX_CALIB_EN, phy_rate); 2429 if (ret) { 2430 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2431 goto end; 2432 } 2433 } 2434 2435 if (!autook_en) 2436 goto end; 2437 /* Set div */ 2438 ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 2439 if (ret) { 2440 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2441 goto end; 2442 } 2443 2444 /* Obtain div and margin */ 2445 ret = __get_target(rtwdev, &tar, phy_rate); 2446 if (ret) { 2447 rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 2448 goto end; 2449 } 2450 2451 mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 2452 2453 if (mgn_set >= 128) { 2454 div_set = 0x0003; 2455 mgn_set = 0x000F; 2456 } else if (mgn_set >= 64) { 2457 div_set = 0x0003; 2458 mgn_set >>= 3; 2459 } else if (mgn_set >= 32) { 2460 div_set = 0x0002; 2461 mgn_set >>= 2; 2462 } else if (mgn_set >= 16) { 2463 div_set = 0x0001; 2464 mgn_set >>= 1; 2465 } else if (mgn_set == 0) { 2466 rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2467 goto end; 2468 } else { 2469 div_set = 0x0000; 2470 } 2471 2472 ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2473 if (ret) { 2474 rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2475 goto end; 2476 } 2477 2478 val16 |= u16_encode_bits(div_set, B_AX_DIV); 2479 2480 ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2481 if (ret) { 2482 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2483 goto end; 2484 } 2485 2486 ret = __get_target(rtwdev, &tar, phy_rate); 2487 if (ret) { 2488 rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2489 goto end; 2490 } 2491 2492 rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2493 tar, div_set, mgn_set); 2494 ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2495 (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2496 if (ret) { 2497 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2498 goto end; 2499 } 2500 2501 /* Enable function */ 2502 ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2503 if (ret) { 2504 rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2505 goto end; 2506 } 2507 2508 /* CLK delay = 0 */ 2509 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2510 PCIE_CLKDLY_HW_0); 2511 2512 end: 2513 /* Set L1BD to ori */ 2514 if (l1_flag) { 2515 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2516 bdr_ori); 2517 if (ret) { 2518 rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2519 RTW89_PCIE_L1_CTRL); 2520 return ret; 2521 } 2522 } 2523 2524 return ret; 2525 } 2526 2527 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2528 { 2529 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2530 int ret; 2531 2532 if (chip_id == RTL8852A) { 2533 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2534 PCIE_PHY_GEN1); 2535 if (ret) 2536 return ret; 2537 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2538 PCIE_PHY_GEN2); 2539 if (ret) 2540 return ret; 2541 } else if (chip_id == RTL8852C) { 2542 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2543 B_AX_DEGLITCH); 2544 rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2545 B_AX_DEGLITCH); 2546 } 2547 2548 return 0; 2549 } 2550 2551 static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev) 2552 { 2553 u16 g1_oobs, g2_oobs; 2554 u32 backup_aspm; 2555 u32 phy_offset; 2556 u16 offset_cal; 2557 u16 oobs_val; 2558 int ret; 2559 u8 gen; 2560 2561 if (rtwdev->chip->chip_id != RTL8852C) 2562 return; 2563 2564 g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2565 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2566 g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + 2567 RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); 2568 if (g1_oobs && g2_oobs) 2569 return; 2570 2571 backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); 2572 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 2573 2574 ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); 2575 if (ret) 2576 goto out; 2577 2578 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN); 2579 rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); 2580 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); 2581 2582 oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, 2583 OOBS_LEVEL_MASK); 2584 2585 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, 2586 OOBS_SEN_MASK, oobs_val); 2587 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, 2588 BAC_OOBS_SEL); 2589 2590 rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, 2591 OOBS_SEN_MASK, oobs_val); 2592 rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, 2593 BAC_OOBS_SEL); 2594 2595 /* offset K */ 2596 for (gen = 1; gen <= 2; gen++) { 2597 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 : 2598 R_RAC_DIRECT_OFFSET_G2; 2599 2600 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 2601 B_PCIE_BIT_RD_SEL); 2602 } 2603 2604 offset_cal = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + 2605 RAC_ANA1F * RAC_MULT, OFFSET_CAL_MASK); 2606 2607 for (gen = 1; gen <= 2; gen++) { 2608 phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 : 2609 R_RAC_DIRECT_OFFSET_G2; 2610 2611 rtw89_write16_mask(rtwdev, phy_offset + RAC_ANA0B * RAC_MULT, 2612 MANUAL_LVL_MASK, offset_cal); 2613 rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, 2614 OFFSET_CAL_MODE); 2615 } 2616 2617 out: 2618 rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm); 2619 } 2620 2621 static void rtw89_pci_ber(struct rtw89_dev *rtwdev) 2622 { 2623 u32 phy_offset; 2624 2625 if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks)) 2626 return; 2627 2628 phy_offset = R_RAC_DIRECT_OFFSET_G1; 2629 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL); 2630 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2631 2632 phy_offset = R_RAC_DIRECT_OFFSET_G2; 2633 rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL); 2634 rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL); 2635 } 2636 2637 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2638 { 2639 if (rtwdev->chip->chip_id != RTL8852A) 2640 return; 2641 2642 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2643 } 2644 2645 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2646 { 2647 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2648 2649 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2650 return; 2651 2652 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2653 } 2654 2655 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2656 { 2657 int ret; 2658 2659 if (rtwdev->chip->chip_id != RTL8852A) 2660 return 0; 2661 2662 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2663 PCIE_PHY_GEN1); 2664 if (ret) 2665 return ret; 2666 2667 ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2668 PCIE_PHY_GEN2); 2669 if (ret) 2670 return ret; 2671 2672 return 0; 2673 } 2674 2675 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2676 { 2677 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2678 2679 if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) 2680 return; 2681 2682 rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2683 } 2684 2685 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2686 { 2687 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2688 2689 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2690 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2691 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2692 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2693 B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2694 } else if (rtwdev->chip->chip_id == RTL8852C) { 2695 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2696 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2697 } 2698 } 2699 2700 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2701 { 2702 if (!rtw89_is_rtl885xb(rtwdev)) 2703 return 0; 2704 2705 return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2706 PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2707 } 2708 2709 static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up) 2710 { 2711 if (pwr_up) 2712 rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2713 else 2714 rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2715 } 2716 2717 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2718 { 2719 if (rtwdev->chip->chip_id != RTL8852C) 2720 return; 2721 2722 rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2723 rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2724 } 2725 2726 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2727 { 2728 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2729 return; 2730 2731 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2732 } 2733 2734 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2735 { 2736 if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2737 return; 2738 2739 rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2740 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2741 rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2742 rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2743 B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2744 } 2745 2746 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2747 { 2748 if (rtwdev->chip->chip_id != RTL8852C) 2749 return; 2750 2751 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2752 } 2753 2754 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2755 { 2756 if (rtwdev->chip->chip_id != RTL8852C) 2757 return; 2758 2759 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2760 } 2761 2762 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2763 { 2764 if (rtwdev->chip->chip_id == RTL8852C) 2765 return; 2766 2767 rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2768 B_AX_SIC_EN_FORCE_CLKREQ); 2769 } 2770 2771 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2772 { 2773 const struct rtw89_pci_info *info = rtwdev->pci_info; 2774 u32 lbc; 2775 2776 if (rtwdev->chip->chip_id == RTL8852C) 2777 return; 2778 2779 lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2780 if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2781 lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2782 lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2783 rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2784 } else { 2785 lbc &= ~B_AX_LBC_EN; 2786 } 2787 rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2788 } 2789 2790 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2791 { 2792 const struct rtw89_pci_info *info = rtwdev->pci_info; 2793 u32 val32; 2794 2795 if (rtwdev->chip->chip_id != RTL8852C) 2796 return; 2797 2798 if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2799 val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2800 info->io_rcy_tmr); 2801 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2802 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2803 rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2804 2805 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2806 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2807 rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2808 } else { 2809 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2810 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2811 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2812 } 2813 2814 rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2815 } 2816 2817 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2818 { 2819 if (rtwdev->chip->chip_id == RTL8852C) 2820 return; 2821 2822 rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2823 B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2824 2825 rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, 2826 B_AX_EN_STUCK_DBG | B_AX_ASFF_FULL_NO_STK, 2827 B_AX_EN_STUCK_DBG); 2828 2829 if (rtwdev->chip->chip_id == RTL8852A) 2830 rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2831 B_AX_EN_CHKDSC_NO_RX_STUCK); 2832 } 2833 2834 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2835 { 2836 if (rtwdev->chip->chip_id == RTL8852C) 2837 return; 2838 2839 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2840 B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2841 } 2842 2843 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev) 2844 { 2845 const struct rtw89_pci_info *info = rtwdev->pci_info; 2846 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2847 u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2848 B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2849 B_AX_CLR_CH12_IDX; 2850 u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2851 u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2852 2853 if (chip_id == RTL8852A || chip_id == RTL8852C) 2854 val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2855 B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2856 /* clear DMA indexes */ 2857 rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2858 if (chip_id == RTL8852A || chip_id == RTL8852C) 2859 rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2860 B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2861 rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2862 B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2863 } 2864 2865 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2866 { 2867 const struct rtw89_pci_info *info = rtwdev->pci_info; 2868 u32 dma_busy1 = info->dma_busy1.addr; 2869 u32 dma_busy2 = info->dma_busy2_reg; 2870 u32 check, dma_busy; 2871 int ret; 2872 2873 check = info->dma_busy1.mask; 2874 2875 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2876 10, 100, false, rtwdev, dma_busy1); 2877 if (ret) 2878 return ret; 2879 2880 if (!dma_busy2) 2881 return 0; 2882 2883 check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2884 2885 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2886 10, 100, false, rtwdev, dma_busy2); 2887 if (ret) 2888 return ret; 2889 2890 return 0; 2891 } 2892 2893 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev) 2894 { 2895 const struct rtw89_pci_info *info = rtwdev->pci_info; 2896 u32 dma_busy3 = info->dma_busy3_reg; 2897 u32 check, dma_busy; 2898 int ret; 2899 2900 check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2901 2902 ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2903 10, 100, false, rtwdev, dma_busy3); 2904 if (ret) 2905 return ret; 2906 2907 return 0; 2908 } 2909 2910 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2911 { 2912 int ret; 2913 2914 ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev); 2915 if (ret) { 2916 rtw89_err(rtwdev, "txdma ch busy\n"); 2917 return ret; 2918 } 2919 2920 ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev); 2921 if (ret) { 2922 rtw89_err(rtwdev, "rxdma ch busy\n"); 2923 return ret; 2924 } 2925 2926 return 0; 2927 } 2928 2929 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2930 { 2931 const struct rtw89_pci_info *info = rtwdev->pci_info; 2932 enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2933 enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2934 enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2935 enum mac_ax_tag_mode tag_mode = info->tag_mode; 2936 enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2937 enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2938 enum mac_ax_tx_burst tx_burst = info->tx_burst; 2939 enum mac_ax_rx_burst rx_burst = info->rx_burst; 2940 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2941 u8 cv = rtwdev->hal.cv; 2942 u32 val32; 2943 2944 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2945 if (chip_id == RTL8852A && cv == CHIP_CBV) 2946 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2947 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2948 if (chip_id == RTL8852A || chip_id == RTL8852B) 2949 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2950 } 2951 2952 if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2953 if (chip_id == RTL8852A && cv == CHIP_CBV) 2954 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2955 } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2956 if (chip_id == RTL8852A || chip_id == RTL8852B) 2957 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2958 } 2959 2960 if (rxbd_mode == MAC_AX_RXBD_PKT) { 2961 rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2962 } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2963 rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2964 2965 if (chip_id == RTL8852A || chip_id == RTL8852B) 2966 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2967 B_AX_PCIE_RX_APPLEN_MASK, 0); 2968 } 2969 2970 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2971 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2972 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2973 } else if (chip_id == RTL8852C) { 2974 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2975 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2976 } 2977 2978 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2979 if (tag_mode == MAC_AX_TAG_SGL) { 2980 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2981 ~B_AX_LATENCY_CONTROL; 2982 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2983 } else if (tag_mode == MAC_AX_TAG_MULTI) { 2984 val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2985 B_AX_LATENCY_CONTROL; 2986 rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2987 } 2988 } 2989 2990 rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2991 info->multi_tag_num); 2992 2993 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 2994 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2995 wd_dma_idle_intvl); 2996 rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2997 wd_dma_act_intvl); 2998 } else if (chip_id == RTL8852C) { 2999 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 3000 wd_dma_idle_intvl); 3001 rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 3002 wd_dma_act_intvl); 3003 } 3004 3005 if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 3006 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 3007 B_AX_HOST_ADDR_INFO_8B_SEL); 3008 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 3009 } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 3010 rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 3011 B_AX_HOST_ADDR_INFO_8B_SEL); 3012 rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 3013 } 3014 3015 return 0; 3016 } 3017 3018 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 3019 { 3020 const struct rtw89_pci_info *info = rtwdev->pci_info; 3021 3022 rtw89_pci_power_wake(rtwdev, false); 3023 3024 if (rtwdev->chip->chip_id == RTL8852A) { 3025 /* ltr sw trigger */ 3026 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 3027 } 3028 info->ltr_set(rtwdev, false); 3029 rtw89_pci_ctrl_dma_all(rtwdev, false); 3030 rtw89_pci_clr_idx_all(rtwdev); 3031 3032 return 0; 3033 } 3034 3035 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) 3036 { 3037 const struct rtw89_pci_info *info = rtwdev->pci_info; 3038 int ret; 3039 3040 rtw89_pci_ber(rtwdev); 3041 rtw89_pci_rxdma_prefth(rtwdev); 3042 rtw89_pci_l1off_pwroff(rtwdev); 3043 rtw89_pci_deglitch_setting(rtwdev); 3044 ret = rtw89_pci_l2_rxen_lat(rtwdev); 3045 if (ret) { 3046 rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 3047 return ret; 3048 } 3049 3050 rtw89_pci_aphy_pwrcut(rtwdev); 3051 rtw89_pci_hci_ldo(rtwdev); 3052 rtw89_pci_dphy_delay(rtwdev); 3053 3054 ret = rtw89_pci_autok_x(rtwdev); 3055 if (ret) { 3056 rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret); 3057 return ret; 3058 } 3059 3060 ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 3061 if (ret) { 3062 rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 3063 return ret; 3064 } 3065 3066 rtw89_pci_power_wake_ax(rtwdev, true); 3067 rtw89_pci_autoload_hang(rtwdev); 3068 rtw89_pci_l12_vmain(rtwdev); 3069 rtw89_pci_gen2_force_ib(rtwdev); 3070 rtw89_pci_l1_ent_lat(rtwdev); 3071 rtw89_pci_wd_exit_l1(rtwdev); 3072 rtw89_pci_set_sic(rtwdev); 3073 rtw89_pci_set_lbc(rtwdev); 3074 rtw89_pci_set_io_rcy(rtwdev); 3075 rtw89_pci_set_dbg(rtwdev); 3076 rtw89_pci_set_keep_reg(rtwdev); 3077 3078 rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA); 3079 3080 /* stop DMA activities */ 3081 rtw89_pci_ctrl_dma_all(rtwdev, false); 3082 3083 ret = rtw89_pci_poll_dma_all_idle(rtwdev); 3084 if (ret) { 3085 rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 3086 return ret; 3087 } 3088 3089 rtw89_pci_clr_idx_all(rtwdev); 3090 rtw89_pci_mode_op(rtwdev); 3091 3092 /* fill TRX BD indexes */ 3093 rtw89_pci_ops_reset(rtwdev); 3094 3095 ret = rtw89_pci_rst_bdram_ax(rtwdev); 3096 if (ret) { 3097 rtw89_warn(rtwdev, "reset bdram busy\n"); 3098 return ret; 3099 } 3100 3101 /* disable all channels except to FW CMD channel to download firmware */ 3102 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false); 3103 rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true); 3104 3105 /* start DMA activities */ 3106 rtw89_pci_ctrl_dma_all(rtwdev, true); 3107 3108 return 0; 3109 } 3110 3111 static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev) 3112 { 3113 rtw89_pci_power_wake_ax(rtwdev, false); 3114 3115 return 0; 3116 } 3117 3118 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 3119 { 3120 u32 val; 3121 3122 if (!en) 3123 return 0; 3124 3125 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 3126 if (rtw89_pci_ltr_is_err_reg_val(val)) 3127 return -EINVAL; 3128 val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 3129 if (rtw89_pci_ltr_is_err_reg_val(val)) 3130 return -EINVAL; 3131 val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 3132 if (rtw89_pci_ltr_is_err_reg_val(val)) 3133 return -EINVAL; 3134 val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 3135 if (rtw89_pci_ltr_is_err_reg_val(val)) 3136 return -EINVAL; 3137 3138 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN | 3139 B_AX_LTR_WD_NOEMP_CHK); 3140 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 3141 PCI_LTR_SPC_500US); 3142 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 3143 PCI_LTR_IDLE_TIMER_3_2MS); 3144 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 3145 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 3146 rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003); 3147 rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 3148 3149 return 0; 3150 } 3151 EXPORT_SYMBOL(rtw89_pci_ltr_set); 3152 3153 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 3154 { 3155 u32 dec_ctrl; 3156 u32 val32; 3157 3158 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 3159 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3160 return -EINVAL; 3161 val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 3162 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3163 return -EINVAL; 3164 dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 3165 if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 3166 return -EINVAL; 3167 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 3168 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3169 return -EINVAL; 3170 val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 3171 if (rtw89_pci_ltr_is_err_reg_val(val32)) 3172 return -EINVAL; 3173 3174 if (!en) { 3175 dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 3176 dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 3177 B_AX_LTR_REQ_DRV; 3178 } else { 3179 dec_ctrl |= B_AX_LTR_HW_DEC_EN; 3180 } 3181 3182 dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 3183 dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 3184 3185 if (en) 3186 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 3187 B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 3188 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 3189 PCI_LTR_IDLE_TIMER_3_2MS); 3190 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 3191 rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 3192 rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 3193 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 3194 rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 3195 3196 return 0; 3197 } 3198 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 3199 3200 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) 3201 { 3202 const struct rtw89_pci_info *info = rtwdev->pci_info; 3203 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3204 int ret; 3205 3206 ret = info->ltr_set(rtwdev, true); 3207 if (ret) { 3208 rtw89_err(rtwdev, "pci ltr set fail\n"); 3209 return ret; 3210 } 3211 if (chip_id == RTL8852A) { 3212 /* ltr sw trigger */ 3213 rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 3214 } 3215 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 3216 /* ADDR info 8-byte mode */ 3217 rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 3218 B_AX_HOST_ADDR_INFO_8B_SEL); 3219 rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 3220 } 3221 3222 /* enable DMA for all queues */ 3223 rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true); 3224 3225 /* Release PCI IO */ 3226 rtw89_write32_clr(rtwdev, info->dma_stop1.addr, 3227 B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 3228 3229 return 0; 3230 } 3231 3232 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 3233 struct pci_dev *pdev) 3234 { 3235 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3236 int ret; 3237 3238 ret = pci_enable_device(pdev); 3239 if (ret) { 3240 rtw89_err(rtwdev, "failed to enable pci device\n"); 3241 return ret; 3242 } 3243 3244 pci_set_master(pdev); 3245 pci_set_drvdata(pdev, rtwdev->hw); 3246 3247 rtwpci->pdev = pdev; 3248 3249 return 0; 3250 } 3251 3252 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 3253 struct pci_dev *pdev) 3254 { 3255 pci_disable_device(pdev); 3256 } 3257 3258 static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev) 3259 { 3260 const struct rtw89_chip_info *chip = rtwdev->chip; 3261 3262 switch (chip->chip_id) { 3263 case RTL8852A: 3264 case RTL8852B: 3265 case RTL8851B: 3266 case RTL8852BT: 3267 return true; 3268 default: 3269 return false; 3270 } 3271 } 3272 3273 static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev) 3274 { 3275 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3276 struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev); 3277 3278 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3279 return true; 3280 3281 if (!bridge) 3282 return false; 3283 3284 switch (bridge->vendor) { 3285 case PCI_VENDOR_ID_INTEL: 3286 return true; 3287 case PCI_VENDOR_ID_ASMEDIA: 3288 if (bridge->device == 0x2806) 3289 return true; 3290 break; 3291 } 3292 3293 return false; 3294 } 3295 3296 static int rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev, bool force) 3297 { 3298 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3299 struct pci_dev *pdev = rtwpci->pdev; 3300 int ret; 3301 u8 val; 3302 3303 if (!rtwpci->enable_dac && !force) 3304 return 0; 3305 3306 if (!rtw89_pci_chip_is_manual_dac(rtwdev)) 3307 return 0; 3308 3309 /* Configure DAC only via PCI config API, not DBI interfaces */ 3310 ret = pci_read_config_byte(pdev, RTW89_PCIE_L1_CTRL, &val); 3311 if (ret) 3312 return ret; 3313 3314 val |= RTW89_PCIE_BIT_EN_64BITS; 3315 return pci_write_config_byte(pdev, RTW89_PCIE_L1_CTRL, val); 3316 } 3317 3318 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 3319 struct pci_dev *pdev) 3320 { 3321 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3322 unsigned long resource_len; 3323 u8 bar_id = 2; 3324 int ret; 3325 3326 ret = pci_request_regions(pdev, KBUILD_MODNAME); 3327 if (ret) { 3328 rtw89_err(rtwdev, "failed to request pci regions\n"); 3329 goto err; 3330 } 3331 3332 if (!rtw89_pci_is_dac_compatible_bridge(rtwdev)) 3333 goto try_dac_done; 3334 3335 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); 3336 if (!ret) { 3337 ret = rtw89_pci_cfg_dac(rtwdev, true); 3338 if (!ret) { 3339 rtwpci->enable_dac = true; 3340 goto try_dac_done; 3341 } 3342 3343 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3344 if (ret) { 3345 rtw89_err(rtwdev, 3346 "failed to set dma and consistent mask to 32/36-bit\n"); 3347 goto err_release_regions; 3348 } 3349 } 3350 try_dac_done: 3351 3352 resource_len = pci_resource_len(pdev, bar_id); 3353 rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 3354 if (!rtwpci->mmap) { 3355 rtw89_err(rtwdev, "failed to map pci io\n"); 3356 ret = -EIO; 3357 goto err_release_regions; 3358 } 3359 3360 return 0; 3361 3362 err_release_regions: 3363 pci_release_regions(pdev); 3364 err: 3365 return ret; 3366 } 3367 3368 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 3369 struct pci_dev *pdev) 3370 { 3371 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3372 3373 if (rtwpci->mmap) { 3374 pci_iounmap(pdev, rtwpci->mmap); 3375 pci_release_regions(pdev); 3376 } 3377 } 3378 3379 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 3380 struct pci_dev *pdev, 3381 struct rtw89_pci_tx_ring *tx_ring) 3382 { 3383 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3384 u8 *head = wd_ring->head; 3385 dma_addr_t dma = wd_ring->dma; 3386 u32 page_size = wd_ring->page_size; 3387 u32 page_num = wd_ring->page_num; 3388 u32 ring_sz = page_size * page_num; 3389 3390 dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3391 wd_ring->head = NULL; 3392 } 3393 3394 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 3395 struct pci_dev *pdev, 3396 struct rtw89_pci_tx_ring *tx_ring) 3397 { 3398 tx_ring->bd_ring.head = NULL; 3399 } 3400 3401 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 3402 struct pci_dev *pdev) 3403 { 3404 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3405 struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool; 3406 const struct rtw89_pci_info *info = rtwdev->pci_info; 3407 struct rtw89_pci_tx_ring *tx_ring; 3408 int i; 3409 3410 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3411 if (info->tx_dma_ch_mask & BIT(i)) 3412 continue; 3413 tx_ring = &rtwpci->tx.rings[i]; 3414 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3415 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3416 } 3417 3418 dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma); 3419 } 3420 3421 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 3422 struct pci_dev *pdev, 3423 struct rtw89_pci_rx_ring *rx_ring) 3424 { 3425 struct rtw89_pci_rx_info *rx_info; 3426 struct sk_buff *skb; 3427 dma_addr_t dma; 3428 u32 buf_sz; 3429 int i; 3430 3431 buf_sz = rx_ring->buf_sz; 3432 for (i = 0; i < rx_ring->bd_ring.len; i++) { 3433 skb = rx_ring->buf[i]; 3434 if (!skb) 3435 continue; 3436 3437 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3438 dma = rx_info->dma; 3439 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3440 dev_kfree_skb(skb); 3441 rx_ring->buf[i] = NULL; 3442 } 3443 3444 rx_ring->bd_ring.head = NULL; 3445 } 3446 3447 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 3448 struct pci_dev *pdev) 3449 { 3450 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3451 struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool; 3452 struct rtw89_pci_rx_ring *rx_ring; 3453 int i; 3454 3455 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3456 rx_ring = &rtwpci->rx.rings[i]; 3457 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3458 } 3459 3460 dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma); 3461 } 3462 3463 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 3464 struct pci_dev *pdev) 3465 { 3466 rtw89_pci_free_rx_rings(rtwdev, pdev); 3467 rtw89_pci_free_tx_rings(rtwdev, pdev); 3468 } 3469 3470 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 3471 struct rtw89_pci_rx_ring *rx_ring, 3472 struct sk_buff *skb, int buf_sz, u32 idx) 3473 { 3474 struct rtw89_pci_rx_info *rx_info; 3475 struct rtw89_pci_rx_bd_32 *rx_bd; 3476 dma_addr_t dma; 3477 3478 if (!skb) 3479 return -EINVAL; 3480 3481 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 3482 if (dma_mapping_error(&pdev->dev, dma)) 3483 return -EBUSY; 3484 3485 rx_info = RTW89_PCI_RX_SKB_CB(skb); 3486 rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 3487 3488 memset(rx_bd, 0, sizeof(*rx_bd)); 3489 rx_bd->buf_size = cpu_to_le16(buf_sz); 3490 rx_bd->dma = cpu_to_le32(dma); 3491 rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI); 3492 rx_info->dma = dma; 3493 3494 return 0; 3495 } 3496 3497 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 3498 struct pci_dev *pdev, 3499 struct rtw89_pci_tx_ring *tx_ring, 3500 enum rtw89_tx_channel txch) 3501 { 3502 struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 3503 struct rtw89_pci_tx_wd *txwd; 3504 dma_addr_t dma; 3505 dma_addr_t cur_paddr; 3506 u8 *head; 3507 u8 *cur_vaddr; 3508 u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 3509 u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 3510 u32 ring_sz = page_size * page_num; 3511 u32 page_offset; 3512 int i; 3513 3514 /* FWCMD queue doesn't use txwd as pages */ 3515 if (txch == RTW89_TXCH_CH12) 3516 return 0; 3517 3518 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3519 if (!head) 3520 return -ENOMEM; 3521 3522 INIT_LIST_HEAD(&wd_ring->free_pages); 3523 wd_ring->head = head; 3524 wd_ring->dma = dma; 3525 wd_ring->page_size = page_size; 3526 wd_ring->page_num = page_num; 3527 3528 page_offset = 0; 3529 for (i = 0; i < page_num; i++) { 3530 txwd = &wd_ring->pages[i]; 3531 cur_paddr = dma + page_offset; 3532 cur_vaddr = head + page_offset; 3533 3534 skb_queue_head_init(&txwd->queue); 3535 INIT_LIST_HEAD(&txwd->list); 3536 txwd->paddr = cur_paddr; 3537 txwd->vaddr = cur_vaddr; 3538 txwd->len = page_size; 3539 txwd->seq = i; 3540 rtw89_pci_enqueue_txwd(tx_ring, txwd); 3541 3542 page_offset += page_size; 3543 } 3544 3545 return 0; 3546 } 3547 3548 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 3549 struct pci_dev *pdev, 3550 struct rtw89_pci_tx_ring *tx_ring, 3551 u32 desc_size, u32 len, 3552 enum rtw89_tx_channel txch, 3553 void *head, dma_addr_t dma) 3554 { 3555 const struct rtw89_pci_ch_dma_addr *txch_addr; 3556 int ret; 3557 3558 ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 3559 if (ret) { 3560 rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 3561 goto err; 3562 } 3563 3564 ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 3565 if (ret) { 3566 rtw89_err(rtwdev, "failed to get address of txch %d", txch); 3567 goto err_free_wd_ring; 3568 } 3569 3570 INIT_LIST_HEAD(&tx_ring->busy_pages); 3571 tx_ring->bd_ring.head = head; 3572 tx_ring->bd_ring.dma = dma; 3573 tx_ring->bd_ring.len = len; 3574 tx_ring->bd_ring.desc_size = desc_size; 3575 tx_ring->bd_ring.addr = *txch_addr; 3576 tx_ring->bd_ring.wp = 0; 3577 tx_ring->bd_ring.rp = 0; 3578 tx_ring->txch = txch; 3579 3580 return 0; 3581 3582 err_free_wd_ring: 3583 rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 3584 err: 3585 return ret; 3586 } 3587 3588 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 3589 struct pci_dev *pdev) 3590 { 3591 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3592 struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool; 3593 const struct rtw89_pci_info *info = rtwdev->pci_info; 3594 struct rtw89_pci_tx_ring *tx_ring; 3595 u32 i, tx_allocated; 3596 dma_addr_t dma; 3597 u32 desc_size; 3598 u32 ring_sz; 3599 u32 pool_sz; 3600 u32 ch_num; 3601 void *head; 3602 u32 len; 3603 int ret; 3604 3605 BUILD_BUG_ON(RTW89_PCI_TXBD_NUM_MAX % 16); 3606 3607 desc_size = sizeof(struct rtw89_pci_tx_bd_32); 3608 len = RTW89_PCI_TXBD_NUM_MAX; 3609 ch_num = RTW89_TXCH_NUM - hweight32(info->tx_dma_ch_mask); 3610 ring_sz = desc_size * len; 3611 pool_sz = ring_sz * ch_num; 3612 3613 head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL); 3614 if (!head) 3615 return -ENOMEM; 3616 3617 bd_pool->head = head; 3618 bd_pool->dma = dma; 3619 bd_pool->size = pool_sz; 3620 3621 for (i = 0; i < RTW89_TXCH_NUM; i++) { 3622 if (info->tx_dma_ch_mask & BIT(i)) 3623 continue; 3624 tx_ring = &rtwpci->tx.rings[i]; 3625 ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 3626 desc_size, len, i, head, dma); 3627 if (ret) { 3628 rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 3629 goto err_free; 3630 } 3631 3632 head += ring_sz; 3633 dma += ring_sz; 3634 } 3635 3636 return 0; 3637 3638 err_free: 3639 tx_allocated = i; 3640 for (i = 0; i < tx_allocated; i++) { 3641 tx_ring = &rtwpci->tx.rings[i]; 3642 rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 3643 } 3644 3645 dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma); 3646 3647 return ret; 3648 } 3649 3650 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3651 struct pci_dev *pdev, 3652 struct rtw89_pci_rx_ring *rx_ring, 3653 u32 desc_size, u32 len, u32 rxch, 3654 void *head, dma_addr_t dma) 3655 { 3656 const struct rtw89_pci_info *info = rtwdev->pci_info; 3657 const struct rtw89_pci_ch_dma_addr *rxch_addr; 3658 struct sk_buff *skb; 3659 int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3660 int i, allocated; 3661 int ret; 3662 3663 ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3664 if (ret) { 3665 rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3666 return ret; 3667 } 3668 3669 rx_ring->bd_ring.head = head; 3670 rx_ring->bd_ring.dma = dma; 3671 rx_ring->bd_ring.len = len; 3672 rx_ring->bd_ring.desc_size = desc_size; 3673 rx_ring->bd_ring.addr = *rxch_addr; 3674 if (info->rx_ring_eq_is_full) 3675 rx_ring->bd_ring.wp = len - 1; 3676 else 3677 rx_ring->bd_ring.wp = 0; 3678 rx_ring->bd_ring.rp = 0; 3679 rx_ring->buf_sz = buf_sz; 3680 rx_ring->diliver_skb = NULL; 3681 rx_ring->diliver_desc.ready = false; 3682 rx_ring->target_rx_tag = 0; 3683 3684 for (i = 0; i < len; i++) { 3685 skb = dev_alloc_skb(buf_sz); 3686 if (!skb) { 3687 ret = -ENOMEM; 3688 goto err_free; 3689 } 3690 3691 memset(skb->data, 0, buf_sz); 3692 rx_ring->buf[i] = skb; 3693 ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3694 buf_sz, i); 3695 if (ret) { 3696 rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3697 dev_kfree_skb_any(skb); 3698 rx_ring->buf[i] = NULL; 3699 goto err_free; 3700 } 3701 } 3702 3703 return 0; 3704 3705 err_free: 3706 allocated = i; 3707 for (i = 0; i < allocated; i++) { 3708 skb = rx_ring->buf[i]; 3709 if (!skb) 3710 continue; 3711 dma = *((dma_addr_t *)skb->cb); 3712 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3713 dev_kfree_skb(skb); 3714 rx_ring->buf[i] = NULL; 3715 } 3716 3717 rx_ring->bd_ring.head = NULL; 3718 3719 return ret; 3720 } 3721 3722 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3723 struct pci_dev *pdev) 3724 { 3725 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3726 struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool; 3727 struct rtw89_pci_rx_ring *rx_ring; 3728 int i, rx_allocated; 3729 dma_addr_t dma; 3730 u32 desc_size; 3731 u32 ring_sz; 3732 u32 pool_sz; 3733 void *head; 3734 u32 len; 3735 int ret; 3736 3737 desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3738 len = RTW89_PCI_RXBD_NUM_MAX; 3739 ring_sz = desc_size * len; 3740 pool_sz = ring_sz * RTW89_RXCH_NUM; 3741 3742 head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL); 3743 if (!head) 3744 return -ENOMEM; 3745 3746 bd_pool->head = head; 3747 bd_pool->dma = dma; 3748 bd_pool->size = pool_sz; 3749 3750 for (i = 0; i < RTW89_RXCH_NUM; i++) { 3751 rx_ring = &rtwpci->rx.rings[i]; 3752 3753 ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3754 desc_size, len, i, 3755 head, dma); 3756 if (ret) { 3757 rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3758 goto err_free; 3759 } 3760 3761 head += ring_sz; 3762 dma += ring_sz; 3763 } 3764 3765 return 0; 3766 3767 err_free: 3768 rx_allocated = i; 3769 for (i = 0; i < rx_allocated; i++) { 3770 rx_ring = &rtwpci->rx.rings[i]; 3771 rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3772 } 3773 3774 dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma); 3775 3776 return ret; 3777 } 3778 3779 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3780 struct pci_dev *pdev) 3781 { 3782 int ret; 3783 3784 ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3785 if (ret) { 3786 rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3787 goto err; 3788 } 3789 3790 ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3791 if (ret) { 3792 rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3793 goto err_free_tx_rings; 3794 } 3795 3796 return 0; 3797 3798 err_free_tx_rings: 3799 rtw89_pci_free_tx_rings(rtwdev, pdev); 3800 err: 3801 return ret; 3802 } 3803 3804 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3805 struct rtw89_pci *rtwpci) 3806 { 3807 skb_queue_head_init(&rtwpci->h2c_queue); 3808 skb_queue_head_init(&rtwpci->h2c_release_queue); 3809 } 3810 3811 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3812 struct pci_dev *pdev) 3813 { 3814 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3815 int ret; 3816 3817 ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3818 if (ret) { 3819 rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3820 goto err; 3821 } 3822 3823 ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3824 if (ret) { 3825 rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3826 goto err_pci_unmap; 3827 } 3828 3829 rtw89_pci_h2c_init(rtwdev, rtwpci); 3830 3831 spin_lock_init(&rtwpci->irq_lock); 3832 spin_lock_init(&rtwpci->trx_lock); 3833 3834 return 0; 3835 3836 err_pci_unmap: 3837 rtw89_pci_clear_mapping(rtwdev, pdev); 3838 err: 3839 return ret; 3840 } 3841 3842 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3843 struct pci_dev *pdev) 3844 { 3845 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3846 3847 rtw89_pci_free_trx_rings(rtwdev, pdev); 3848 rtw89_pci_clear_mapping(rtwdev, pdev); 3849 rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3850 skb_queue_len(&rtwpci->h2c_queue), true); 3851 } 3852 3853 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3854 { 3855 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3856 const struct rtw89_chip_info *chip = rtwdev->chip; 3857 u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN; 3858 3859 if (chip->chip_id == RTL8851B) 3860 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND; 3861 3862 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3863 3864 if (rtwpci->under_recovery) { 3865 rtwpci->intrs[0] = hs0isr_ind_int_en; 3866 rtwpci->intrs[1] = 0; 3867 } else { 3868 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3869 B_AX_RXDMA_INT_EN | 3870 B_AX_RXP1DMA_INT_EN | 3871 B_AX_RPQDMA_INT_EN | 3872 B_AX_RXDMA_STUCK_INT_EN | 3873 B_AX_RDU_INT_EN | 3874 B_AX_RPQBD_FULL_INT_EN | 3875 hs0isr_ind_int_en; 3876 3877 rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3878 } 3879 } 3880 EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3881 3882 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3883 { 3884 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3885 3886 rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3887 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3888 rtwpci->intrs[0] = 0; 3889 rtwpci->intrs[1] = 0; 3890 } 3891 3892 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3893 { 3894 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3895 3896 rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3897 B_AX_HS1ISR_IND_INT_EN | 3898 B_AX_HS0ISR_IND_INT_EN; 3899 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3900 rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3901 B_AX_RXDMA_INT_EN | 3902 B_AX_RXP1DMA_INT_EN | 3903 B_AX_RPQDMA_INT_EN | 3904 B_AX_RXDMA_STUCK_INT_EN | 3905 B_AX_RDU_INT_EN | 3906 B_AX_RPQBD_FULL_INT_EN; 3907 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3908 } 3909 3910 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3911 { 3912 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3913 3914 rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3915 B_AX_HS0ISR_IND_INT_EN; 3916 rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3917 rtwpci->intrs[0] = 0; 3918 rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3919 } 3920 3921 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3922 { 3923 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3924 3925 if (rtwpci->under_recovery) 3926 rtw89_pci_recovery_intr_mask_v1(rtwdev); 3927 else if (rtwpci->low_power) 3928 rtw89_pci_low_power_intr_mask_v1(rtwdev); 3929 else 3930 rtw89_pci_default_intr_mask_v1(rtwdev); 3931 } 3932 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3933 3934 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev) 3935 { 3936 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3937 3938 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3939 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3940 rtwpci->intrs[0] = 0; 3941 rtwpci->intrs[1] = 0; 3942 } 3943 3944 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev) 3945 { 3946 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3947 3948 rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 | 3949 B_BE_HS0_IND_INT_EN0; 3950 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3951 rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 | 3952 B_BE_RDU_CH0_INT_IMR_V1; 3953 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3954 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3955 } 3956 3957 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev) 3958 { 3959 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3960 3961 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 | 3962 B_BE_HS1_IND_INT_EN0; 3963 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3964 rtwpci->intrs[0] = 0; 3965 rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 | 3966 B_BE_PCIE_RX_RPQ0_IMR0_V1; 3967 } 3968 3969 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev) 3970 { 3971 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3972 3973 if (rtwpci->under_recovery) 3974 rtw89_pci_recovery_intr_mask_v2(rtwdev); 3975 else if (rtwpci->low_power) 3976 rtw89_pci_low_power_intr_mask_v2(rtwdev); 3977 else 3978 rtw89_pci_default_intr_mask_v2(rtwdev); 3979 } 3980 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2); 3981 3982 static void rtw89_pci_recovery_intr_mask_v3(struct rtw89_dev *rtwdev) 3983 { 3984 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3985 3986 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3987 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3988 rtwpci->intrs[0] = 0; 3989 rtwpci->intrs[1] = 0; 3990 } 3991 3992 static void rtw89_pci_default_intr_mask_v3(struct rtw89_dev *rtwdev) 3993 { 3994 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3995 3996 rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0; 3997 rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN; 3998 rtwpci->intrs[0] = 0; 3999 rtwpci->intrs[1] = B_BE_PCIE_RDU_CH1_IMR | 4000 B_BE_PCIE_RDU_CH0_IMR | 4001 B_BE_PCIE_RX_RX0P2_IMR0_V1 | 4002 B_BE_PCIE_RX_RPQ0_IMR0_V1; 4003 } 4004 4005 void rtw89_pci_config_intr_mask_v3(struct rtw89_dev *rtwdev) 4006 { 4007 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4008 4009 if (rtwpci->under_recovery) 4010 rtw89_pci_recovery_intr_mask_v3(rtwdev); 4011 else 4012 rtw89_pci_default_intr_mask_v3(rtwdev); 4013 } 4014 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v3); 4015 4016 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 4017 struct pci_dev *pdev) 4018 { 4019 unsigned long flags = 0; 4020 int ret; 4021 4022 flags |= PCI_IRQ_INTX | PCI_IRQ_MSI; 4023 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 4024 if (ret < 0) { 4025 rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 4026 goto err; 4027 } 4028 4029 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 4030 rtw89_pci_interrupt_handler, 4031 rtw89_pci_interrupt_threadfn, 4032 IRQF_SHARED, KBUILD_MODNAME, rtwdev); 4033 if (ret) { 4034 rtw89_err(rtwdev, "failed to request threaded irq\n"); 4035 goto err_free_vector; 4036 } 4037 4038 rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 4039 4040 return 0; 4041 4042 err_free_vector: 4043 pci_free_irq_vectors(pdev); 4044 err: 4045 return ret; 4046 } 4047 4048 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 4049 struct pci_dev *pdev) 4050 { 4051 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 4052 pci_free_irq_vectors(pdev); 4053 } 4054 4055 static u16 gray_code_to_bin(u16 gray_code) 4056 { 4057 u16 binary = gray_code; 4058 4059 while (gray_code) { 4060 gray_code >>= 1; 4061 binary ^= gray_code; 4062 } 4063 4064 return binary; 4065 } 4066 4067 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 4068 { 4069 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4070 struct pci_dev *pdev = rtwpci->pdev; 4071 u16 val16, filter_out_val; 4072 u32 val, phy_offset; 4073 int ret; 4074 4075 if (rtwdev->chip->chip_id != RTL8852C) 4076 return 0; 4077 4078 val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 4079 if (val == B_AX_ASPM_CTRL_L1) 4080 return 0; 4081 4082 ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 4083 if (ret) 4084 return ret; 4085 4086 val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 4087 if (val == RTW89_PCIE_GEN1_SPEED) { 4088 phy_offset = R_RAC_DIRECT_OFFSET_G1; 4089 } else if (val == RTW89_PCIE_GEN2_SPEED) { 4090 phy_offset = R_RAC_DIRECT_OFFSET_G2; 4091 val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 4092 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 4093 val16 | B_PCIE_BIT_PINOUT_DIS); 4094 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 4095 val16 & ~B_PCIE_BIT_RD_SEL); 4096 4097 val16 = rtw89_read16_mask(rtwdev, 4098 phy_offset + RAC_ANA1F * RAC_MULT, 4099 FILTER_OUT_EQ_MASK); 4100 val16 = gray_code_to_bin(val16); 4101 filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 4102 RAC_MULT); 4103 filter_out_val &= ~REG_FILTER_OUT_MASK; 4104 filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 4105 4106 rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 4107 filter_out_val); 4108 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 4109 B_BAC_EQ_SEL); 4110 rtw89_write16_set(rtwdev, 4111 R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 4112 B_PCIE_BIT_PSAVE); 4113 } else { 4114 return -EOPNOTSUPP; 4115 } 4116 rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 4117 B_PCIE_BIT_PSAVE); 4118 4119 return 0; 4120 } 4121 4122 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 4123 { 4124 const struct rtw89_pci_info *info = rtwdev->pci_info; 4125 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4126 4127 if (rtw89_pci_disable_clkreq) 4128 return; 4129 4130 gen_def->clkreq_set(rtwdev, enable); 4131 } 4132 4133 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) 4134 { 4135 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4136 int ret; 4137 4138 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 4139 PCIE_CLKDLY_HW_30US); 4140 if (ret) 4141 rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 4142 4143 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4144 if (enable) 4145 ret = rtw89_pci_config_byte_set(rtwdev, 4146 RTW89_PCIE_L1_CTRL, 4147 RTW89_PCIE_BIT_CLK); 4148 else 4149 ret = rtw89_pci_config_byte_clr(rtwdev, 4150 RTW89_PCIE_L1_CTRL, 4151 RTW89_PCIE_BIT_CLK); 4152 if (ret) 4153 rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 4154 enable ? "set" : "unset", ret); 4155 } else if (chip_id == RTL8852C) { 4156 rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 4157 B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 4158 if (enable) 4159 rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 4160 B_AX_CLK_REQ_N); 4161 else 4162 rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 4163 B_AX_CLK_REQ_N); 4164 } 4165 } 4166 4167 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 4168 { 4169 const struct rtw89_pci_info *info = rtwdev->pci_info; 4170 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4171 4172 if (rtw89_pci_disable_aspm_l1) 4173 return; 4174 4175 gen_def->aspm_set(rtwdev, enable); 4176 } 4177 4178 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) 4179 { 4180 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4181 u8 value = 0; 4182 int ret; 4183 4184 ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 4185 if (ret) 4186 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 4187 4188 u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK); 4189 u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK); 4190 4191 ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 4192 if (ret) 4193 rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); 4194 4195 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4196 if (enable) 4197 ret = rtw89_pci_config_byte_set(rtwdev, 4198 RTW89_PCIE_L1_CTRL, 4199 RTW89_PCIE_BIT_L1); 4200 else 4201 ret = rtw89_pci_config_byte_clr(rtwdev, 4202 RTW89_PCIE_L1_CTRL, 4203 RTW89_PCIE_BIT_L1); 4204 } else if (chip_id == RTL8852C) { 4205 if (enable) 4206 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4207 B_AX_ASPM_CTRL_L1); 4208 else 4209 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4210 B_AX_ASPM_CTRL_L1); 4211 } 4212 if (ret) 4213 rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 4214 enable ? "set" : "unset", ret); 4215 } 4216 4217 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 4218 { 4219 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 4220 const struct rtw89_pci_info *info = rtwdev->pci_info; 4221 struct rtw89_traffic_stats *stats = &rtwdev->stats; 4222 enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 4223 enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 4224 u32 val = 0; 4225 4226 if (rtwdev->scanning || 4227 (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH)) 4228 goto out; 4229 4230 if (chip_gen == RTW89_CHIP_BE) 4231 val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN; 4232 else 4233 val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 4234 FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 4235 FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 4236 FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 4237 4238 out: 4239 rtw89_write32(rtwdev, info->mit_addr, val); 4240 } 4241 4242 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 4243 { 4244 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4245 struct pci_dev *pdev = rtwpci->pdev; 4246 u16 link_ctrl; 4247 int ret; 4248 4249 /* Though there is standard PCIE configuration space to set the 4250 * link control register, but by Realtek's design, driver should 4251 * check if host supports CLKREQ/ASPM to enable the HW module. 4252 * 4253 * These functions are implemented by two HW modules associated, 4254 * one is responsible to access PCIE configuration space to 4255 * follow the host settings, and another is in charge of doing 4256 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 4257 * the host does not support it, and due to some reasons or wrong 4258 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 4259 * loss if HW misbehaves on the link. 4260 * 4261 * Hence it's designed that driver should first check the PCIE 4262 * configuration space is sync'ed and enabled, then driver can turn 4263 * on the other module that is actually working on the mechanism. 4264 */ 4265 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 4266 if (ret) { 4267 rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 4268 return; 4269 } 4270 4271 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 4272 rtw89_pci_clkreq_set(rtwdev, true); 4273 4274 if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 4275 rtw89_pci_aspm_set(rtwdev, true); 4276 } 4277 4278 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 4279 { 4280 const struct rtw89_pci_info *info = rtwdev->pci_info; 4281 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4282 4283 if (rtw89_pci_disable_l1ss) 4284 return; 4285 4286 gen_def->l1ss_set(rtwdev, enable); 4287 } 4288 4289 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) 4290 { 4291 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4292 int ret; 4293 4294 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4295 if (enable) 4296 ret = rtw89_pci_config_byte_set(rtwdev, 4297 RTW89_PCIE_TIMER_CTRL, 4298 RTW89_PCIE_BIT_L1SUB); 4299 else 4300 ret = rtw89_pci_config_byte_clr(rtwdev, 4301 RTW89_PCIE_TIMER_CTRL, 4302 RTW89_PCIE_BIT_L1SUB); 4303 if (ret) 4304 rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 4305 enable ? "set" : "unset", ret); 4306 } else if (chip_id == RTL8852C) { 4307 ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 4308 RTW89_PCIE_BIT_ASPM_L11 | 4309 RTW89_PCIE_BIT_PCI_L11); 4310 if (ret) 4311 rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 4312 if (enable) 4313 rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4314 B_AX_L1SUB_DISABLE); 4315 else 4316 rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 4317 B_AX_L1SUB_DISABLE); 4318 } 4319 } 4320 4321 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 4322 { 4323 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4324 struct pci_dev *pdev = rtwpci->pdev; 4325 u32 l1ss_cap_ptr, l1ss_ctrl; 4326 4327 if (rtw89_pci_disable_l1ss) 4328 return; 4329 4330 l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 4331 if (!l1ss_cap_ptr) 4332 return; 4333 4334 pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 4335 4336 if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 4337 rtw89_pci_l1ss_set(rtwdev, true); 4338 } 4339 4340 static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev) 4341 { 4342 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4343 struct pci_dev *pdev = rtwpci->pdev; 4344 4345 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, 4346 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS); 4347 } 4348 4349 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev) 4350 { 4351 int ret = 0; 4352 u32 sts; 4353 u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 4354 4355 ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 4356 10, 1000, false, rtwdev, 4357 R_AX_PCIE_DMA_BUSY1); 4358 if (ret) { 4359 rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 4360 rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 4361 return -EINVAL; 4362 } 4363 return ret; 4364 } 4365 4366 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev) 4367 { 4368 u32 val; 4369 int ret; 4370 4371 if (rtwdev->chip->chip_id == RTL8852C) 4372 return 0; 4373 4374 rtw89_pci_ctrl_dma_all(rtwdev, false); 4375 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4376 if (ret) { 4377 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4378 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4379 "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 4380 R_AX_DBG_ERR_FLAG, val); 4381 if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 4382 rtw89_mac_ctrl_hci_dma_tx(rtwdev, false); 4383 if (val & B_AX_RX_STUCK) 4384 rtw89_mac_ctrl_hci_dma_rx(rtwdev, false); 4385 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4386 ret = rtw89_pci_poll_io_idle_ax(rtwdev); 4387 val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 4388 rtw89_debug(rtwdev, RTW89_DBG_HCI, 4389 "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 4390 R_AX_DBG_ERR_FLAG, val); 4391 } 4392 4393 return ret; 4394 } 4395 4396 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev) 4397 { 4398 int ret; 4399 4400 if (rtwdev->chip->chip_id == RTL8852C) 4401 return 0; 4402 4403 rtw89_mac_ctrl_hci_dma_trx(rtwdev, false); 4404 rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); 4405 rtw89_pci_clr_idx_all(rtwdev); 4406 4407 ret = rtw89_pci_rst_bdram_ax(rtwdev); 4408 if (ret) 4409 return ret; 4410 4411 rtw89_pci_ctrl_dma_all(rtwdev, true); 4412 return 0; 4413 } 4414 4415 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 4416 enum rtw89_lv1_rcvy_step step) 4417 { 4418 const struct rtw89_pci_info *info = rtwdev->pci_info; 4419 const struct rtw89_pci_gen_def *gen_def = info->gen_def; 4420 int ret; 4421 4422 switch (step) { 4423 case RTW89_LV1_RCVY_STEP_1: 4424 ret = gen_def->lv1rst_stop_dma(rtwdev); 4425 if (ret) 4426 rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 4427 4428 break; 4429 4430 case RTW89_LV1_RCVY_STEP_2: 4431 ret = gen_def->lv1rst_start_dma(rtwdev); 4432 if (ret) 4433 rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 4434 break; 4435 4436 default: 4437 return -EINVAL; 4438 } 4439 4440 return ret; 4441 } 4442 4443 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 4444 { 4445 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) 4446 return; 4447 4448 if (rtwdev->chip->chip_id == RTL8852C) { 4449 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4450 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1)); 4451 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4452 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1)); 4453 } else { 4454 rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 4455 rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 4456 rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 4457 rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 4458 rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 4459 rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 4460 } 4461 } 4462 4463 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 4464 { 4465 struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 4466 struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 4467 const struct rtw89_pci_info *info = rtwdev->pci_info; 4468 const struct rtw89_pci_isr_def *isr_def = info->isr_def; 4469 unsigned long flags; 4470 int work_done; 4471 4472 rtwdev->napi_budget_countdown = budget; 4473 4474 rtw89_write32(rtwdev, isr_def->isr_clear_rpq.addr, isr_def->isr_clear_rpq.data); 4475 work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4476 if (work_done == budget) 4477 return budget; 4478 4479 rtw89_write32(rtwdev, isr_def->isr_clear_rxq.addr, isr_def->isr_clear_rxq.data); 4480 work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 4481 if (work_done < budget && napi_complete_done(napi, work_done)) { 4482 spin_lock_irqsave(&rtwpci->irq_lock, flags); 4483 if (likely(rtwpci->running)) 4484 rtw89_chip_enable_intr(rtwdev, rtwpci); 4485 spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 4486 } 4487 4488 return work_done; 4489 } 4490 4491 static 4492 void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev, 4493 struct pci_dev *pdev, 4494 const struct rtw89_pci_ssid_quirk *ssid_quirks) 4495 { 4496 int i; 4497 4498 if (!ssid_quirks) 4499 return; 4500 4501 for (i = 0; i < 200; i++, ssid_quirks++) { 4502 if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0) 4503 break; 4504 4505 if (ssid_quirks->vendor != pdev->vendor || 4506 ssid_quirks->device != pdev->device || 4507 ssid_quirks->subsystem_vendor != pdev->subsystem_vendor || 4508 ssid_quirks->subsystem_device != pdev->subsystem_device) 4509 continue; 4510 4511 bitmap_or(rtwdev->quirks, rtwdev->quirks, &ssid_quirks->bitmap, 4512 NUM_OF_RTW89_QUIRKS); 4513 rtwdev->custid = ssid_quirks->custid; 4514 break; 4515 } 4516 4517 rtw89_debug(rtwdev, RTW89_DBG_HCI, "quirks=%*ph custid=%d\n", 4518 (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid); 4519 } 4520 4521 static int __maybe_unused rtw89_pci_suspend(struct device *dev) 4522 { 4523 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4524 struct rtw89_dev *rtwdev = hw->priv; 4525 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4526 4527 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4528 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4529 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4530 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4531 rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 4532 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4533 rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 4534 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4535 } else { 4536 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4537 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4538 } 4539 4540 return 0; 4541 } 4542 4543 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 4544 { 4545 if (rtwdev->chip->chip_id == RTL8852C) 4546 return; 4547 4548 /* Hardware need write the reg twice to ensure the setting work */ 4549 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4550 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4551 rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 4552 RTW89_PCIE_BIT_CFG_RST_MSTATE); 4553 } 4554 4555 void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume) 4556 { 4557 if (resume) 4558 rtw89_pci_cfg_dac(rtwdev, false); 4559 4560 rtw89_pci_disable_eq(rtwdev); 4561 rtw89_pci_filter_out(rtwdev); 4562 rtw89_pci_cpl_timeout_cfg(rtwdev); 4563 rtw89_pci_link_cfg(rtwdev); 4564 rtw89_pci_l1ss_cfg(rtwdev); 4565 } 4566 4567 static int __maybe_unused rtw89_pci_resume(struct device *dev) 4568 { 4569 struct ieee80211_hw *hw = dev_get_drvdata(dev); 4570 struct rtw89_dev *rtwdev = hw->priv; 4571 enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 4572 4573 rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4574 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 4575 rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 4576 if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { 4577 rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 4578 B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 4579 rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 4580 B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 4581 } else { 4582 rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4583 B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 4584 rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 4585 B_AX_SEL_REQ_ENTR_L1); 4586 } 4587 rtw89_pci_l2_hci_ldo(rtwdev); 4588 4589 rtw89_pci_basic_cfg(rtwdev, true); 4590 4591 return 0; 4592 } 4593 4594 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 4595 EXPORT_SYMBOL(rtw89_pm_ops); 4596 4597 static pci_ers_result_t rtw89_pci_io_error_detected(struct pci_dev *pdev, 4598 pci_channel_state_t state) 4599 { 4600 struct net_device *netdev = pci_get_drvdata(pdev); 4601 4602 netif_device_detach(netdev); 4603 4604 return PCI_ERS_RESULT_NEED_RESET; 4605 } 4606 4607 static pci_ers_result_t rtw89_pci_io_slot_reset(struct pci_dev *pdev) 4608 { 4609 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4610 struct rtw89_dev *rtwdev = hw->priv; 4611 4612 rtw89_ser_notify(rtwdev, MAC_AX_ERR_ASSERTION); 4613 4614 return PCI_ERS_RESULT_RECOVERED; 4615 } 4616 4617 static void rtw89_pci_io_resume(struct pci_dev *pdev) 4618 { 4619 struct net_device *netdev = pci_get_drvdata(pdev); 4620 4621 /* ack any pending wake events, disable PME */ 4622 pci_enable_wake(pdev, PCI_D0, 0); 4623 4624 netif_device_attach(netdev); 4625 } 4626 4627 const struct pci_error_handlers rtw89_pci_err_handler = { 4628 .error_detected = rtw89_pci_io_error_detected, 4629 .slot_reset = rtw89_pci_io_slot_reset, 4630 .resume = rtw89_pci_io_resume, 4631 }; 4632 EXPORT_SYMBOL(rtw89_pci_err_handler); 4633 4634 const struct rtw89_pci_isr_def rtw89_pci_isr_ax = { 4635 .isr_rdu = B_AX_RDU_INT, 4636 .isr_halt_c2h = B_AX_HALT_C2H_INT_EN, 4637 .isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN, 4638 .isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT}, 4639 .isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | 4640 B_AX_RDU_INT}, 4641 }; 4642 EXPORT_SYMBOL(rtw89_pci_isr_ax); 4643 4644 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = { 4645 .mac_pre_init = rtw89_pci_ops_mac_pre_init_ax, 4646 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax, 4647 .mac_post_init = rtw89_pci_ops_mac_post_init_ax, 4648 4649 .clr_idx_all = rtw89_pci_clr_idx_all_ax, 4650 .rst_bdram = rtw89_pci_rst_bdram_ax, 4651 4652 .lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax, 4653 .lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax, 4654 4655 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax, 4656 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax, 4657 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax, 4658 4659 .aspm_set = rtw89_pci_aspm_set_ax, 4660 .clkreq_set = rtw89_pci_clkreq_set_ax, 4661 .l1ss_set = rtw89_pci_l1ss_set_ax, 4662 4663 .disable_eq = rtw89_pci_disable_eq_ax, 4664 .power_wake = rtw89_pci_power_wake_ax, 4665 }; 4666 EXPORT_SYMBOL(rtw89_pci_gen_ax); 4667 4668 static const struct rtw89_hci_ops rtw89_pci_ops = { 4669 .tx_write = rtw89_pci_ops_tx_write, 4670 .tx_kick_off = rtw89_pci_ops_tx_kick_off, 4671 .flush_queues = rtw89_pci_ops_flush_queues, 4672 .reset = rtw89_pci_ops_reset, 4673 .start = rtw89_pci_ops_start, 4674 .stop = rtw89_pci_ops_stop, 4675 .pause = rtw89_pci_ops_pause, 4676 .switch_mode = rtw89_pci_ops_switch_mode, 4677 .recalc_int_mit = rtw89_pci_recalc_int_mit, 4678 4679 .read8 = rtw89_pci_ops_read8, 4680 .read16 = rtw89_pci_ops_read16, 4681 .read32 = rtw89_pci_ops_read32, 4682 .write8 = rtw89_pci_ops_write8, 4683 .write16 = rtw89_pci_ops_write16, 4684 .write32 = rtw89_pci_ops_write32, 4685 4686 .mac_pre_init = rtw89_pci_ops_mac_pre_init, 4687 .mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit, 4688 .mac_post_init = rtw89_pci_ops_mac_post_init, 4689 .deinit = rtw89_pci_ops_deinit, 4690 4691 .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 4692 .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 4693 .dump_err_status = rtw89_pci_ops_dump_err_status, 4694 .napi_poll = rtw89_pci_napi_poll, 4695 4696 .recovery_start = rtw89_pci_ops_recovery_start, 4697 .recovery_complete = rtw89_pci_ops_recovery_complete, 4698 4699 .ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch, 4700 .ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch, 4701 .ctrl_trxhci = rtw89_pci_ctrl_dma_trx, 4702 .poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle, 4703 4704 .clr_idx_all = rtw89_pci_clr_idx_all, 4705 .clear = rtw89_pci_clear_resource, 4706 .disable_intr = rtw89_pci_disable_intr_lock, 4707 .enable_intr = rtw89_pci_enable_intr_lock, 4708 .rst_bdram = rtw89_pci_reset_bdram, 4709 }; 4710 4711 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4712 { 4713 struct rtw89_dev *rtwdev; 4714 const struct rtw89_driver_info *info; 4715 const struct rtw89_pci_info *pci_info; 4716 int ret; 4717 4718 info = (const struct rtw89_driver_info *)id->driver_data; 4719 4720 rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 4721 sizeof(struct rtw89_pci), 4722 info->chip, info->variant); 4723 if (!rtwdev) { 4724 dev_err(&pdev->dev, "failed to allocate hw\n"); 4725 return -ENOMEM; 4726 } 4727 4728 pci_info = info->bus.pci; 4729 4730 rtwdev->pci_info = info->bus.pci; 4731 rtwdev->hci.ops = &rtw89_pci_ops; 4732 rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 4733 rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_PCIE; 4734 rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 4735 rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 4736 4737 rtw89_check_quirks(rtwdev, info->quirks); 4738 rtw89_check_pci_ssid_quirks(rtwdev, pdev, pci_info->ssid_quirks); 4739 4740 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 4741 4742 ret = rtw89_core_init(rtwdev); 4743 if (ret) { 4744 rtw89_err(rtwdev, "failed to initialise core\n"); 4745 goto err_release_hw; 4746 } 4747 4748 ret = rtw89_pci_claim_device(rtwdev, pdev); 4749 if (ret) { 4750 rtw89_err(rtwdev, "failed to claim pci device\n"); 4751 goto err_core_deinit; 4752 } 4753 4754 ret = rtw89_pci_setup_resource(rtwdev, pdev); 4755 if (ret) { 4756 rtw89_err(rtwdev, "failed to setup pci resource\n"); 4757 goto err_declaim_pci; 4758 } 4759 4760 ret = rtw89_chip_info_setup(rtwdev); 4761 if (ret) { 4762 rtw89_err(rtwdev, "failed to setup chip information\n"); 4763 goto err_clear_resource; 4764 } 4765 4766 rtw89_pci_basic_cfg(rtwdev, false); 4767 4768 ret = rtw89_core_napi_init(rtwdev); 4769 if (ret) { 4770 rtw89_err(rtwdev, "failed to init napi\n"); 4771 goto err_clear_resource; 4772 } 4773 4774 ret = rtw89_pci_request_irq(rtwdev, pdev); 4775 if (ret) { 4776 rtw89_err(rtwdev, "failed to request pci irq\n"); 4777 goto err_deinit_napi; 4778 } 4779 4780 ret = rtw89_core_register(rtwdev); 4781 if (ret) { 4782 rtw89_err(rtwdev, "failed to register core\n"); 4783 goto err_free_irq; 4784 } 4785 4786 set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags); 4787 4788 return 0; 4789 4790 err_free_irq: 4791 rtw89_pci_free_irq(rtwdev, pdev); 4792 err_deinit_napi: 4793 rtw89_core_napi_deinit(rtwdev); 4794 err_clear_resource: 4795 rtw89_pci_clear_resource(rtwdev, pdev); 4796 err_declaim_pci: 4797 rtw89_pci_declaim_device(rtwdev, pdev); 4798 err_core_deinit: 4799 rtw89_core_deinit(rtwdev); 4800 err_release_hw: 4801 rtw89_free_ieee80211_hw(rtwdev); 4802 4803 return ret; 4804 } 4805 EXPORT_SYMBOL(rtw89_pci_probe); 4806 4807 void rtw89_pci_remove(struct pci_dev *pdev) 4808 { 4809 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 4810 struct rtw89_dev *rtwdev; 4811 4812 rtwdev = hw->priv; 4813 4814 rtw89_pci_free_irq(rtwdev, pdev); 4815 rtw89_core_napi_deinit(rtwdev); 4816 rtw89_core_unregister(rtwdev); 4817 rtw89_pci_clear_resource(rtwdev, pdev); 4818 rtw89_pci_declaim_device(rtwdev, pdev); 4819 rtw89_core_deinit(rtwdev); 4820 rtw89_free_ieee80211_hw(rtwdev); 4821 } 4822 EXPORT_SYMBOL(rtw89_pci_remove); 4823 4824 MODULE_AUTHOR("Realtek Corporation"); 4825 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver"); 4826 MODULE_LICENSE("Dual BSD/GPL"); 4827