1*8e93258fSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2*8e93258fSBjoern A. Zeeb /* Copyright(c) 2020 Realtek Corporation 3*8e93258fSBjoern A. Zeeb */ 4*8e93258fSBjoern A. Zeeb 5*8e93258fSBjoern A. Zeeb #if defined(__FreeBSD__) 6*8e93258fSBjoern A. Zeeb #define LINUXKPI_PARAM_PREFIX rtw89_pci_ 7*8e93258fSBjoern A. Zeeb #endif 8*8e93258fSBjoern A. Zeeb 9*8e93258fSBjoern A. Zeeb #include <linux/pci.h> 10*8e93258fSBjoern A. Zeeb 11*8e93258fSBjoern A. Zeeb #include "mac.h" 12*8e93258fSBjoern A. Zeeb #include "pci.h" 13*8e93258fSBjoern A. Zeeb #include "reg.h" 14*8e93258fSBjoern A. Zeeb #include "ser.h" 15*8e93258fSBjoern A. Zeeb 16*8e93258fSBjoern A. Zeeb static bool rtw89_pci_disable_clkreq; 17*8e93258fSBjoern A. Zeeb static bool rtw89_pci_disable_aspm_l1; 18*8e93258fSBjoern A. Zeeb static bool rtw89_pci_disable_l1ss; 19*8e93258fSBjoern A. Zeeb module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644); 20*8e93258fSBjoern A. Zeeb module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644); 21*8e93258fSBjoern A. Zeeb module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644); 22*8e93258fSBjoern A. Zeeb MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support"); 23*8e93258fSBjoern A. Zeeb MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support"); 24*8e93258fSBjoern A. Zeeb MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support"); 25*8e93258fSBjoern A. Zeeb 26*8e93258fSBjoern A. Zeeb static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev) 27*8e93258fSBjoern A. Zeeb { 28*8e93258fSBjoern A. Zeeb u32 val; 29*8e93258fSBjoern A. Zeeb int ret; 30*8e93258fSBjoern A. Zeeb 31*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, 32*8e93258fSBjoern A. Zeeb rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM); 33*8e93258fSBjoern A. Zeeb 34*8e93258fSBjoern A. Zeeb ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM), 35*8e93258fSBjoern A. Zeeb 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false, 36*8e93258fSBjoern A. Zeeb rtwdev, R_AX_PCIE_INIT_CFG1); 37*8e93258fSBjoern A. Zeeb 38*8e93258fSBjoern A. Zeeb if (ret) 39*8e93258fSBjoern A. Zeeb return -EBUSY; 40*8e93258fSBjoern A. Zeeb 41*8e93258fSBjoern A. Zeeb return 0; 42*8e93258fSBjoern A. Zeeb } 43*8e93258fSBjoern A. Zeeb 44*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev, 45*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring, 46*8e93258fSBjoern A. Zeeb u32 cur_idx, bool tx) 47*8e93258fSBjoern A. Zeeb { 48*8e93258fSBjoern A. Zeeb u32 cnt, cur_rp, wp, rp, len; 49*8e93258fSBjoern A. Zeeb 50*8e93258fSBjoern A. Zeeb rp = bd_ring->rp; 51*8e93258fSBjoern A. Zeeb wp = bd_ring->wp; 52*8e93258fSBjoern A. Zeeb len = bd_ring->len; 53*8e93258fSBjoern A. Zeeb 54*8e93258fSBjoern A. Zeeb cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 55*8e93258fSBjoern A. Zeeb if (tx) 56*8e93258fSBjoern A. Zeeb cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp); 57*8e93258fSBjoern A. Zeeb else 58*8e93258fSBjoern A. Zeeb cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp); 59*8e93258fSBjoern A. Zeeb 60*8e93258fSBjoern A. Zeeb bd_ring->rp = cur_rp; 61*8e93258fSBjoern A. Zeeb 62*8e93258fSBjoern A. Zeeb return cnt; 63*8e93258fSBjoern A. Zeeb } 64*8e93258fSBjoern A. Zeeb 65*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev, 66*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring) 67*8e93258fSBjoern A. Zeeb { 68*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 69*8e93258fSBjoern A. Zeeb u32 addr_idx = bd_ring->addr.idx; 70*8e93258fSBjoern A. Zeeb u32 cnt, idx; 71*8e93258fSBjoern A. Zeeb 72*8e93258fSBjoern A. Zeeb idx = rtw89_read32(rtwdev, addr_idx); 73*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true); 74*8e93258fSBjoern A. Zeeb 75*8e93258fSBjoern A. Zeeb return cnt; 76*8e93258fSBjoern A. Zeeb } 77*8e93258fSBjoern A. Zeeb 78*8e93258fSBjoern A. Zeeb static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev, 79*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci, 80*8e93258fSBjoern A. Zeeb u32 cnt, bool release_all) 81*8e93258fSBjoern A. Zeeb { 82*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_data *tx_data; 83*8e93258fSBjoern A. Zeeb struct sk_buff *skb; 84*8e93258fSBjoern A. Zeeb u32 qlen; 85*8e93258fSBjoern A. Zeeb 86*8e93258fSBjoern A. Zeeb while (cnt--) { 87*8e93258fSBjoern A. Zeeb skb = skb_dequeue(&rtwpci->h2c_queue); 88*8e93258fSBjoern A. Zeeb if (!skb) { 89*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to pre-release fwcmd\n"); 90*8e93258fSBjoern A. Zeeb return; 91*8e93258fSBjoern A. Zeeb } 92*8e93258fSBjoern A. Zeeb skb_queue_tail(&rtwpci->h2c_release_queue, skb); 93*8e93258fSBjoern A. Zeeb } 94*8e93258fSBjoern A. Zeeb 95*8e93258fSBjoern A. Zeeb qlen = skb_queue_len(&rtwpci->h2c_release_queue); 96*8e93258fSBjoern A. Zeeb if (!release_all) 97*8e93258fSBjoern A. Zeeb qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0; 98*8e93258fSBjoern A. Zeeb 99*8e93258fSBjoern A. Zeeb while (qlen--) { 100*8e93258fSBjoern A. Zeeb skb = skb_dequeue(&rtwpci->h2c_release_queue); 101*8e93258fSBjoern A. Zeeb if (!skb) { 102*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to release fwcmd\n"); 103*8e93258fSBjoern A. Zeeb return; 104*8e93258fSBjoern A. Zeeb } 105*8e93258fSBjoern A. Zeeb tx_data = RTW89_PCI_TX_SKB_CB(skb); 106*8e93258fSBjoern A. Zeeb dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 107*8e93258fSBjoern A. Zeeb DMA_TO_DEVICE); 108*8e93258fSBjoern A. Zeeb dev_kfree_skb_any(skb); 109*8e93258fSBjoern A. Zeeb } 110*8e93258fSBjoern A. Zeeb } 111*8e93258fSBjoern A. Zeeb 112*8e93258fSBjoern A. Zeeb static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev, 113*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci) 114*8e93258fSBjoern A. Zeeb { 115*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 116*8e93258fSBjoern A. Zeeb u32 cnt; 117*8e93258fSBjoern A. Zeeb 118*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 119*8e93258fSBjoern A. Zeeb if (!cnt) 120*8e93258fSBjoern A. Zeeb return; 121*8e93258fSBjoern A. Zeeb rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false); 122*8e93258fSBjoern A. Zeeb } 123*8e93258fSBjoern A. Zeeb 124*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev, 125*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring) 126*8e93258fSBjoern A. Zeeb { 127*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 128*8e93258fSBjoern A. Zeeb u32 addr_idx = bd_ring->addr.idx; 129*8e93258fSBjoern A. Zeeb u32 cnt, idx; 130*8e93258fSBjoern A. Zeeb 131*8e93258fSBjoern A. Zeeb idx = rtw89_read32(rtwdev, addr_idx); 132*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false); 133*8e93258fSBjoern A. Zeeb 134*8e93258fSBjoern A. Zeeb return cnt; 135*8e93258fSBjoern A. Zeeb } 136*8e93258fSBjoern A. Zeeb 137*8e93258fSBjoern A. Zeeb static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev, 138*8e93258fSBjoern A. Zeeb struct sk_buff *skb) 139*8e93258fSBjoern A. Zeeb { 140*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_info *rx_info; 141*8e93258fSBjoern A. Zeeb dma_addr_t dma; 142*8e93258fSBjoern A. Zeeb 143*8e93258fSBjoern A. Zeeb rx_info = RTW89_PCI_RX_SKB_CB(skb); 144*8e93258fSBjoern A. Zeeb dma = rx_info->dma; 145*8e93258fSBjoern A. Zeeb dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 146*8e93258fSBjoern A. Zeeb DMA_FROM_DEVICE); 147*8e93258fSBjoern A. Zeeb } 148*8e93258fSBjoern A. Zeeb 149*8e93258fSBjoern A. Zeeb static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, 150*8e93258fSBjoern A. Zeeb struct sk_buff *skb) 151*8e93258fSBjoern A. Zeeb { 152*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_info *rx_info; 153*8e93258fSBjoern A. Zeeb dma_addr_t dma; 154*8e93258fSBjoern A. Zeeb 155*8e93258fSBjoern A. Zeeb rx_info = RTW89_PCI_RX_SKB_CB(skb); 156*8e93258fSBjoern A. Zeeb dma = rx_info->dma; 157*8e93258fSBjoern A. Zeeb dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE, 158*8e93258fSBjoern A. Zeeb DMA_FROM_DEVICE); 159*8e93258fSBjoern A. Zeeb } 160*8e93258fSBjoern A. Zeeb 161*8e93258fSBjoern A. Zeeb static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, 162*8e93258fSBjoern A. Zeeb struct sk_buff *skb) 163*8e93258fSBjoern A. Zeeb { 164*8e93258fSBjoern A. Zeeb struct rtw89_pci_rxbd_info *rxbd_info; 165*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); 166*8e93258fSBjoern A. Zeeb 167*8e93258fSBjoern A. Zeeb rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; 168*8e93258fSBjoern A. Zeeb rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); 169*8e93258fSBjoern A. Zeeb rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); 170*8e93258fSBjoern A. Zeeb rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); 171*8e93258fSBjoern A. Zeeb rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); 172*8e93258fSBjoern A. Zeeb 173*8e93258fSBjoern A. Zeeb return 0; 174*8e93258fSBjoern A. Zeeb } 175*8e93258fSBjoern A. Zeeb 176*8e93258fSBjoern A. Zeeb static bool 177*8e93258fSBjoern A. Zeeb rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls, 178*8e93258fSBjoern A. Zeeb struct sk_buff *new, 179*8e93258fSBjoern A. Zeeb const struct sk_buff *skb, u32 offset, 180*8e93258fSBjoern A. Zeeb const struct rtw89_pci_rx_info *rx_info, 181*8e93258fSBjoern A. Zeeb const struct rtw89_rx_desc_info *desc_info) 182*8e93258fSBjoern A. Zeeb { 183*8e93258fSBjoern A. Zeeb u32 copy_len = rx_info->len - offset; 184*8e93258fSBjoern A. Zeeb 185*8e93258fSBjoern A. Zeeb if (unlikely(skb_tailroom(new) < copy_len)) { 186*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_TXRX, 187*8e93258fSBjoern A. Zeeb "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n", 188*8e93258fSBjoern A. Zeeb rx_info->len, desc_info->pkt_size, offset, fs, ls); 189*8e93258fSBjoern A. Zeeb rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ", 190*8e93258fSBjoern A. Zeeb skb->data, rx_info->len); 191*8e93258fSBjoern A. Zeeb /* length of a single segment skb is desc_info->pkt_size */ 192*8e93258fSBjoern A. Zeeb if (fs && ls) { 193*8e93258fSBjoern A. Zeeb copy_len = desc_info->pkt_size; 194*8e93258fSBjoern A. Zeeb } else { 195*8e93258fSBjoern A. Zeeb rtw89_info(rtwdev, "drop rx data due to invalid length\n"); 196*8e93258fSBjoern A. Zeeb return false; 197*8e93258fSBjoern A. Zeeb } 198*8e93258fSBjoern A. Zeeb } 199*8e93258fSBjoern A. Zeeb 200*8e93258fSBjoern A. Zeeb skb_put_data(new, skb->data + offset, copy_len); 201*8e93258fSBjoern A. Zeeb 202*8e93258fSBjoern A. Zeeb return true; 203*8e93258fSBjoern A. Zeeb } 204*8e93258fSBjoern A. Zeeb 205*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev, 206*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring) 207*8e93258fSBjoern A. Zeeb { 208*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 209*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_info *rx_info; 210*8e93258fSBjoern A. Zeeb struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc; 211*8e93258fSBjoern A. Zeeb struct sk_buff *new = rx_ring->diliver_skb; 212*8e93258fSBjoern A. Zeeb struct sk_buff *skb; 213*8e93258fSBjoern A. Zeeb u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 214*8e93258fSBjoern A. Zeeb u32 offset; 215*8e93258fSBjoern A. Zeeb u32 cnt = 1; 216*8e93258fSBjoern A. Zeeb bool fs, ls; 217*8e93258fSBjoern A. Zeeb int ret; 218*8e93258fSBjoern A. Zeeb 219*8e93258fSBjoern A. Zeeb skb = rx_ring->buf[bd_ring->wp]; 220*8e93258fSBjoern A. Zeeb rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 221*8e93258fSBjoern A. Zeeb 222*8e93258fSBjoern A. Zeeb ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 223*8e93258fSBjoern A. Zeeb if (ret) { 224*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 225*8e93258fSBjoern A. Zeeb bd_ring->wp, ret); 226*8e93258fSBjoern A. Zeeb goto err_sync_device; 227*8e93258fSBjoern A. Zeeb } 228*8e93258fSBjoern A. Zeeb 229*8e93258fSBjoern A. Zeeb rx_info = RTW89_PCI_RX_SKB_CB(skb); 230*8e93258fSBjoern A. Zeeb fs = rx_info->fs; 231*8e93258fSBjoern A. Zeeb ls = rx_info->ls; 232*8e93258fSBjoern A. Zeeb 233*8e93258fSBjoern A. Zeeb if (fs) { 234*8e93258fSBjoern A. Zeeb if (new) { 235*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_UNEXP, 236*8e93258fSBjoern A. Zeeb "skb should not be ready before first segment start\n"); 237*8e93258fSBjoern A. Zeeb goto err_sync_device; 238*8e93258fSBjoern A. Zeeb } 239*8e93258fSBjoern A. Zeeb if (desc_info->ready) { 240*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n"); 241*8e93258fSBjoern A. Zeeb goto err_sync_device; 242*8e93258fSBjoern A. Zeeb } 243*8e93258fSBjoern A. Zeeb 244*8e93258fSBjoern A. Zeeb rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size); 245*8e93258fSBjoern A. Zeeb 246*8e93258fSBjoern A. Zeeb new = dev_alloc_skb(desc_info->pkt_size); 247*8e93258fSBjoern A. Zeeb if (!new) 248*8e93258fSBjoern A. Zeeb goto err_sync_device; 249*8e93258fSBjoern A. Zeeb 250*8e93258fSBjoern A. Zeeb rx_ring->diliver_skb = new; 251*8e93258fSBjoern A. Zeeb 252*8e93258fSBjoern A. Zeeb /* first segment has RX desc */ 253*8e93258fSBjoern A. Zeeb offset = desc_info->offset; 254*8e93258fSBjoern A. Zeeb offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 255*8e93258fSBjoern A. Zeeb sizeof(struct rtw89_rxdesc_short); 256*8e93258fSBjoern A. Zeeb } else { 257*8e93258fSBjoern A. Zeeb offset = sizeof(struct rtw89_pci_rxbd_info); 258*8e93258fSBjoern A. Zeeb if (!new) { 259*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n"); 260*8e93258fSBjoern A. Zeeb goto err_sync_device; 261*8e93258fSBjoern A. Zeeb } 262*8e93258fSBjoern A. Zeeb } 263*8e93258fSBjoern A. Zeeb if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info)) 264*8e93258fSBjoern A. Zeeb goto err_sync_device; 265*8e93258fSBjoern A. Zeeb rtw89_pci_sync_skb_for_device(rtwdev, skb); 266*8e93258fSBjoern A. Zeeb rtw89_pci_rxbd_increase(rx_ring, 1); 267*8e93258fSBjoern A. Zeeb 268*8e93258fSBjoern A. Zeeb if (!desc_info->ready) { 269*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "no rx desc information\n"); 270*8e93258fSBjoern A. Zeeb goto err_free_resource; 271*8e93258fSBjoern A. Zeeb } 272*8e93258fSBjoern A. Zeeb if (ls) { 273*8e93258fSBjoern A. Zeeb rtw89_core_rx(rtwdev, desc_info, new); 274*8e93258fSBjoern A. Zeeb rx_ring->diliver_skb = NULL; 275*8e93258fSBjoern A. Zeeb desc_info->ready = false; 276*8e93258fSBjoern A. Zeeb } 277*8e93258fSBjoern A. Zeeb 278*8e93258fSBjoern A. Zeeb return cnt; 279*8e93258fSBjoern A. Zeeb 280*8e93258fSBjoern A. Zeeb err_sync_device: 281*8e93258fSBjoern A. Zeeb rtw89_pci_sync_skb_for_device(rtwdev, skb); 282*8e93258fSBjoern A. Zeeb rtw89_pci_rxbd_increase(rx_ring, 1); 283*8e93258fSBjoern A. Zeeb err_free_resource: 284*8e93258fSBjoern A. Zeeb if (new) 285*8e93258fSBjoern A. Zeeb dev_kfree_skb_any(new); 286*8e93258fSBjoern A. Zeeb rx_ring->diliver_skb = NULL; 287*8e93258fSBjoern A. Zeeb desc_info->ready = false; 288*8e93258fSBjoern A. Zeeb 289*8e93258fSBjoern A. Zeeb return cnt; 290*8e93258fSBjoern A. Zeeb } 291*8e93258fSBjoern A. Zeeb 292*8e93258fSBjoern A. Zeeb static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev, 293*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring, 294*8e93258fSBjoern A. Zeeb u32 cnt) 295*8e93258fSBjoern A. Zeeb { 296*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 297*8e93258fSBjoern A. Zeeb u32 rx_cnt; 298*8e93258fSBjoern A. Zeeb 299*8e93258fSBjoern A. Zeeb while (cnt && rtwdev->napi_budget_countdown > 0) { 300*8e93258fSBjoern A. Zeeb rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring); 301*8e93258fSBjoern A. Zeeb if (!rx_cnt) { 302*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to deliver RXBD skb\n"); 303*8e93258fSBjoern A. Zeeb 304*8e93258fSBjoern A. Zeeb /* skip the rest RXBD bufs */ 305*8e93258fSBjoern A. Zeeb rtw89_pci_rxbd_increase(rx_ring, cnt); 306*8e93258fSBjoern A. Zeeb break; 307*8e93258fSBjoern A. Zeeb } 308*8e93258fSBjoern A. Zeeb 309*8e93258fSBjoern A. Zeeb cnt -= rx_cnt; 310*8e93258fSBjoern A. Zeeb } 311*8e93258fSBjoern A. Zeeb 312*8e93258fSBjoern A. Zeeb rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 313*8e93258fSBjoern A. Zeeb } 314*8e93258fSBjoern A. Zeeb 315*8e93258fSBjoern A. Zeeb static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev, 316*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci, int budget) 317*8e93258fSBjoern A. Zeeb { 318*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring; 319*8e93258fSBjoern A. Zeeb int countdown = rtwdev->napi_budget_countdown; 320*8e93258fSBjoern A. Zeeb u32 cnt; 321*8e93258fSBjoern A. Zeeb 322*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ]; 323*8e93258fSBjoern A. Zeeb 324*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 325*8e93258fSBjoern A. Zeeb if (!cnt) 326*8e93258fSBjoern A. Zeeb return 0; 327*8e93258fSBjoern A. Zeeb 328*8e93258fSBjoern A. Zeeb cnt = min_t(u32, budget, cnt); 329*8e93258fSBjoern A. Zeeb 330*8e93258fSBjoern A. Zeeb rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt); 331*8e93258fSBjoern A. Zeeb 332*8e93258fSBjoern A. Zeeb /* In case of flushing pending SKBs, the countdown may exceed. */ 333*8e93258fSBjoern A. Zeeb if (rtwdev->napi_budget_countdown <= 0) 334*8e93258fSBjoern A. Zeeb return budget; 335*8e93258fSBjoern A. Zeeb 336*8e93258fSBjoern A. Zeeb return budget - countdown; 337*8e93258fSBjoern A. Zeeb } 338*8e93258fSBjoern A. Zeeb 339*8e93258fSBjoern A. Zeeb static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev, 340*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring, 341*8e93258fSBjoern A. Zeeb struct sk_buff *skb, u8 tx_status) 342*8e93258fSBjoern A. Zeeb { 343*8e93258fSBjoern A. Zeeb struct ieee80211_tx_info *info; 344*8e93258fSBjoern A. Zeeb 345*8e93258fSBjoern A. Zeeb info = IEEE80211_SKB_CB(skb); 346*8e93258fSBjoern A. Zeeb ieee80211_tx_info_clear_status(info); 347*8e93258fSBjoern A. Zeeb 348*8e93258fSBjoern A. Zeeb if (info->flags & IEEE80211_TX_CTL_NO_ACK) 349*8e93258fSBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 350*8e93258fSBjoern A. Zeeb if (tx_status == RTW89_TX_DONE) { 351*8e93258fSBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_ACK; 352*8e93258fSBjoern A. Zeeb tx_ring->tx_acked++; 353*8e93258fSBjoern A. Zeeb } else { 354*8e93258fSBjoern A. Zeeb if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 355*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_FW, 356*8e93258fSBjoern A. Zeeb "failed to TX of status %x\n", tx_status); 357*8e93258fSBjoern A. Zeeb switch (tx_status) { 358*8e93258fSBjoern A. Zeeb case RTW89_TX_RETRY_LIMIT: 359*8e93258fSBjoern A. Zeeb tx_ring->tx_retry_lmt++; 360*8e93258fSBjoern A. Zeeb break; 361*8e93258fSBjoern A. Zeeb case RTW89_TX_LIFE_TIME: 362*8e93258fSBjoern A. Zeeb tx_ring->tx_life_time++; 363*8e93258fSBjoern A. Zeeb break; 364*8e93258fSBjoern A. Zeeb case RTW89_TX_MACID_DROP: 365*8e93258fSBjoern A. Zeeb tx_ring->tx_mac_id_drop++; 366*8e93258fSBjoern A. Zeeb break; 367*8e93258fSBjoern A. Zeeb default: 368*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status); 369*8e93258fSBjoern A. Zeeb break; 370*8e93258fSBjoern A. Zeeb } 371*8e93258fSBjoern A. Zeeb } 372*8e93258fSBjoern A. Zeeb 373*8e93258fSBjoern A. Zeeb ieee80211_tx_status_ni(rtwdev->hw, skb); 374*8e93258fSBjoern A. Zeeb } 375*8e93258fSBjoern A. Zeeb 376*8e93258fSBjoern A. Zeeb static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 377*8e93258fSBjoern A. Zeeb { 378*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd *txwd; 379*8e93258fSBjoern A. Zeeb u32 cnt; 380*8e93258fSBjoern A. Zeeb 381*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring); 382*8e93258fSBjoern A. Zeeb while (cnt--) { 383*8e93258fSBjoern A. Zeeb txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 384*8e93258fSBjoern A. Zeeb if (!txwd) { 385*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "No busy txwd pages available\n"); 386*8e93258fSBjoern A. Zeeb break; 387*8e93258fSBjoern A. Zeeb } 388*8e93258fSBjoern A. Zeeb 389*8e93258fSBjoern A. Zeeb list_del_init(&txwd->list); 390*8e93258fSBjoern A. Zeeb 391*8e93258fSBjoern A. Zeeb /* this skb has been freed by RPP */ 392*8e93258fSBjoern A. Zeeb if (skb_queue_len(&txwd->queue) == 0) 393*8e93258fSBjoern A. Zeeb rtw89_pci_enqueue_txwd(tx_ring, txwd); 394*8e93258fSBjoern A. Zeeb } 395*8e93258fSBjoern A. Zeeb } 396*8e93258fSBjoern A. Zeeb 397*8e93258fSBjoern A. Zeeb static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev, 398*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring) 399*8e93258fSBjoern A. Zeeb { 400*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 401*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd *txwd; 402*8e93258fSBjoern A. Zeeb int i; 403*8e93258fSBjoern A. Zeeb 404*8e93258fSBjoern A. Zeeb for (i = 0; i < wd_ring->page_num; i++) { 405*8e93258fSBjoern A. Zeeb txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list); 406*8e93258fSBjoern A. Zeeb if (!txwd) 407*8e93258fSBjoern A. Zeeb break; 408*8e93258fSBjoern A. Zeeb 409*8e93258fSBjoern A. Zeeb list_del_init(&txwd->list); 410*8e93258fSBjoern A. Zeeb } 411*8e93258fSBjoern A. Zeeb } 412*8e93258fSBjoern A. Zeeb 413*8e93258fSBjoern A. Zeeb static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev, 414*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring, 415*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd *txwd, u16 seq, 416*8e93258fSBjoern A. Zeeb u8 tx_status) 417*8e93258fSBjoern A. Zeeb { 418*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 419*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_data *tx_data; 420*8e93258fSBjoern A. Zeeb struct sk_buff *skb, *tmp; 421*8e93258fSBjoern A. Zeeb u8 txch = tx_ring->txch; 422*8e93258fSBjoern A. Zeeb 423*8e93258fSBjoern A. Zeeb if (!list_empty(&txwd->list)) { 424*8e93258fSBjoern A. Zeeb rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 425*8e93258fSBjoern A. Zeeb /* In low power mode, RPP can receive before updating of TX BD. 426*8e93258fSBjoern A. Zeeb * In normal mode, it should not happen so give it a warning. 427*8e93258fSBjoern A. Zeeb */ 428*8e93258fSBjoern A. Zeeb if (!rtwpci->low_power && !list_empty(&txwd->list)) 429*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n", 430*8e93258fSBjoern A. Zeeb txch, seq); 431*8e93258fSBjoern A. Zeeb } 432*8e93258fSBjoern A. Zeeb 433*8e93258fSBjoern A. Zeeb skb_queue_walk_safe(&txwd->queue, skb, tmp) { 434*8e93258fSBjoern A. Zeeb skb_unlink(skb, &txwd->queue); 435*8e93258fSBjoern A. Zeeb 436*8e93258fSBjoern A. Zeeb tx_data = RTW89_PCI_TX_SKB_CB(skb); 437*8e93258fSBjoern A. Zeeb dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, 438*8e93258fSBjoern A. Zeeb DMA_TO_DEVICE); 439*8e93258fSBjoern A. Zeeb 440*8e93258fSBjoern A. Zeeb rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status); 441*8e93258fSBjoern A. Zeeb } 442*8e93258fSBjoern A. Zeeb 443*8e93258fSBjoern A. Zeeb if (list_empty(&txwd->list)) 444*8e93258fSBjoern A. Zeeb rtw89_pci_enqueue_txwd(tx_ring, txwd); 445*8e93258fSBjoern A. Zeeb } 446*8e93258fSBjoern A. Zeeb 447*8e93258fSBjoern A. Zeeb static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev, 448*8e93258fSBjoern A. Zeeb struct rtw89_pci_rpp_fmt *rpp) 449*8e93258fSBjoern A. Zeeb { 450*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 451*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring; 452*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd_ring *wd_ring; 453*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd *txwd; 454*8e93258fSBjoern A. Zeeb u16 seq; 455*8e93258fSBjoern A. Zeeb u8 qsel, tx_status, txch; 456*8e93258fSBjoern A. Zeeb 457*8e93258fSBjoern A. Zeeb seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ); 458*8e93258fSBjoern A. Zeeb qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL); 459*8e93258fSBjoern A. Zeeb tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS); 460*8e93258fSBjoern A. Zeeb txch = rtw89_core_get_ch_dma(rtwdev, qsel); 461*8e93258fSBjoern A. Zeeb 462*8e93258fSBjoern A. Zeeb if (txch == RTW89_TXCH_CH12) { 463*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "should no fwcmd release report\n"); 464*8e93258fSBjoern A. Zeeb return; 465*8e93258fSBjoern A. Zeeb } 466*8e93258fSBjoern A. Zeeb 467*8e93258fSBjoern A. Zeeb tx_ring = &rtwpci->tx_rings[txch]; 468*8e93258fSBjoern A. Zeeb wd_ring = &tx_ring->wd_ring; 469*8e93258fSBjoern A. Zeeb txwd = &wd_ring->pages[seq]; 470*8e93258fSBjoern A. Zeeb 471*8e93258fSBjoern A. Zeeb rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status); 472*8e93258fSBjoern A. Zeeb } 473*8e93258fSBjoern A. Zeeb 474*8e93258fSBjoern A. Zeeb static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev, 475*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring) 476*8e93258fSBjoern A. Zeeb { 477*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 478*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd *txwd; 479*8e93258fSBjoern A. Zeeb int i; 480*8e93258fSBjoern A. Zeeb 481*8e93258fSBjoern A. Zeeb for (i = 0; i < wd_ring->page_num; i++) { 482*8e93258fSBjoern A. Zeeb txwd = &wd_ring->pages[i]; 483*8e93258fSBjoern A. Zeeb 484*8e93258fSBjoern A. Zeeb if (!list_empty(&txwd->list)) 485*8e93258fSBjoern A. Zeeb continue; 486*8e93258fSBjoern A. Zeeb 487*8e93258fSBjoern A. Zeeb rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP); 488*8e93258fSBjoern A. Zeeb } 489*8e93258fSBjoern A. Zeeb } 490*8e93258fSBjoern A. Zeeb 491*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev, 492*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring, 493*8e93258fSBjoern A. Zeeb u32 max_cnt) 494*8e93258fSBjoern A. Zeeb { 495*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 496*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_info *rx_info; 497*8e93258fSBjoern A. Zeeb struct rtw89_pci_rpp_fmt *rpp; 498*8e93258fSBjoern A. Zeeb struct rtw89_rx_desc_info desc_info = {}; 499*8e93258fSBjoern A. Zeeb struct sk_buff *skb; 500*8e93258fSBjoern A. Zeeb u32 cnt = 0; 501*8e93258fSBjoern A. Zeeb u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt); 502*8e93258fSBjoern A. Zeeb u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info); 503*8e93258fSBjoern A. Zeeb u32 offset; 504*8e93258fSBjoern A. Zeeb int ret; 505*8e93258fSBjoern A. Zeeb 506*8e93258fSBjoern A. Zeeb skb = rx_ring->buf[bd_ring->wp]; 507*8e93258fSBjoern A. Zeeb rtw89_pci_sync_skb_for_cpu(rtwdev, skb); 508*8e93258fSBjoern A. Zeeb 509*8e93258fSBjoern A. Zeeb ret = rtw89_pci_rxbd_info_update(rtwdev, skb); 510*8e93258fSBjoern A. Zeeb if (ret) { 511*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n", 512*8e93258fSBjoern A. Zeeb bd_ring->wp, ret); 513*8e93258fSBjoern A. Zeeb goto err_sync_device; 514*8e93258fSBjoern A. Zeeb } 515*8e93258fSBjoern A. Zeeb 516*8e93258fSBjoern A. Zeeb rx_info = RTW89_PCI_RX_SKB_CB(skb); 517*8e93258fSBjoern A. Zeeb if (!rx_info->fs || !rx_info->ls) { 518*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n"); 519*8e93258fSBjoern A. Zeeb return cnt; 520*8e93258fSBjoern A. Zeeb } 521*8e93258fSBjoern A. Zeeb 522*8e93258fSBjoern A. Zeeb rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size); 523*8e93258fSBjoern A. Zeeb 524*8e93258fSBjoern A. Zeeb /* first segment has RX desc */ 525*8e93258fSBjoern A. Zeeb offset = desc_info.offset; 526*8e93258fSBjoern A. Zeeb offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) : 527*8e93258fSBjoern A. Zeeb sizeof(struct rtw89_rxdesc_short); 528*8e93258fSBjoern A. Zeeb for (; offset + rpp_size <= rx_info->len; offset += rpp_size) { 529*8e93258fSBjoern A. Zeeb rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset); 530*8e93258fSBjoern A. Zeeb rtw89_pci_release_rpp(rtwdev, rpp); 531*8e93258fSBjoern A. Zeeb } 532*8e93258fSBjoern A. Zeeb 533*8e93258fSBjoern A. Zeeb rtw89_pci_sync_skb_for_device(rtwdev, skb); 534*8e93258fSBjoern A. Zeeb rtw89_pci_rxbd_increase(rx_ring, 1); 535*8e93258fSBjoern A. Zeeb cnt++; 536*8e93258fSBjoern A. Zeeb 537*8e93258fSBjoern A. Zeeb return cnt; 538*8e93258fSBjoern A. Zeeb 539*8e93258fSBjoern A. Zeeb err_sync_device: 540*8e93258fSBjoern A. Zeeb rtw89_pci_sync_skb_for_device(rtwdev, skb); 541*8e93258fSBjoern A. Zeeb return 0; 542*8e93258fSBjoern A. Zeeb } 543*8e93258fSBjoern A. Zeeb 544*8e93258fSBjoern A. Zeeb static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev, 545*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring, 546*8e93258fSBjoern A. Zeeb u32 cnt) 547*8e93258fSBjoern A. Zeeb { 548*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring; 549*8e93258fSBjoern A. Zeeb u32 release_cnt; 550*8e93258fSBjoern A. Zeeb 551*8e93258fSBjoern A. Zeeb while (cnt) { 552*8e93258fSBjoern A. Zeeb release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt); 553*8e93258fSBjoern A. Zeeb if (!release_cnt) { 554*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to release TX skbs\n"); 555*8e93258fSBjoern A. Zeeb 556*8e93258fSBjoern A. Zeeb /* skip the rest RXBD bufs */ 557*8e93258fSBjoern A. Zeeb rtw89_pci_rxbd_increase(rx_ring, cnt); 558*8e93258fSBjoern A. Zeeb break; 559*8e93258fSBjoern A. Zeeb } 560*8e93258fSBjoern A. Zeeb 561*8e93258fSBjoern A. Zeeb cnt -= release_cnt; 562*8e93258fSBjoern A. Zeeb } 563*8e93258fSBjoern A. Zeeb 564*8e93258fSBjoern A. Zeeb rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp); 565*8e93258fSBjoern A. Zeeb } 566*8e93258fSBjoern A. Zeeb 567*8e93258fSBjoern A. Zeeb static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev, 568*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci, int budget) 569*8e93258fSBjoern A. Zeeb { 570*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring; 571*8e93258fSBjoern A. Zeeb u32 cnt; 572*8e93258fSBjoern A. Zeeb int work_done; 573*8e93258fSBjoern A. Zeeb 574*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 575*8e93258fSBjoern A. Zeeb 576*8e93258fSBjoern A. Zeeb spin_lock_bh(&rtwpci->trx_lock); 577*8e93258fSBjoern A. Zeeb 578*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 579*8e93258fSBjoern A. Zeeb if (cnt == 0) 580*8e93258fSBjoern A. Zeeb goto out_unlock; 581*8e93258fSBjoern A. Zeeb 582*8e93258fSBjoern A. Zeeb rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 583*8e93258fSBjoern A. Zeeb 584*8e93258fSBjoern A. Zeeb out_unlock: 585*8e93258fSBjoern A. Zeeb spin_unlock_bh(&rtwpci->trx_lock); 586*8e93258fSBjoern A. Zeeb 587*8e93258fSBjoern A. Zeeb /* always release all RPQ */ 588*8e93258fSBjoern A. Zeeb work_done = min_t(int, cnt, budget); 589*8e93258fSBjoern A. Zeeb rtwdev->napi_budget_countdown -= work_done; 590*8e93258fSBjoern A. Zeeb 591*8e93258fSBjoern A. Zeeb return work_done; 592*8e93258fSBjoern A. Zeeb } 593*8e93258fSBjoern A. Zeeb 594*8e93258fSBjoern A. Zeeb static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev, 595*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci) 596*8e93258fSBjoern A. Zeeb { 597*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring; 598*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring; 599*8e93258fSBjoern A. Zeeb u32 reg_idx; 600*8e93258fSBjoern A. Zeeb u16 hw_idx, hw_idx_next, host_idx; 601*8e93258fSBjoern A. Zeeb int i; 602*8e93258fSBjoern A. Zeeb 603*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_RXCH_NUM; i++) { 604*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[i]; 605*8e93258fSBjoern A. Zeeb bd_ring = &rx_ring->bd_ring; 606*8e93258fSBjoern A. Zeeb 607*8e93258fSBjoern A. Zeeb reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 608*8e93258fSBjoern A. Zeeb hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx); 609*8e93258fSBjoern A. Zeeb host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx); 610*8e93258fSBjoern A. Zeeb hw_idx_next = (hw_idx + 1) % bd_ring->len; 611*8e93258fSBjoern A. Zeeb 612*8e93258fSBjoern A. Zeeb if (hw_idx_next == host_idx) 613*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i); 614*8e93258fSBjoern A. Zeeb 615*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_TXRX, 616*8e93258fSBjoern A. Zeeb "%d RXD unavailable, idx=0x%08x, len=%d\n", 617*8e93258fSBjoern A. Zeeb i, reg_idx, bd_ring->len); 618*8e93258fSBjoern A. Zeeb } 619*8e93258fSBjoern A. Zeeb } 620*8e93258fSBjoern A. Zeeb 621*8e93258fSBjoern A. Zeeb void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev, 622*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci, 623*8e93258fSBjoern A. Zeeb struct rtw89_pci_isrs *isrs) 624*8e93258fSBjoern A. Zeeb { 625*8e93258fSBjoern A. Zeeb isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs; 626*8e93258fSBjoern A. Zeeb isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0]; 627*8e93258fSBjoern A. Zeeb isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1]; 628*8e93258fSBjoern A. Zeeb 629*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 630*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]); 631*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]); 632*8e93258fSBjoern A. Zeeb } 633*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_recognize_intrs); 634*8e93258fSBjoern A. Zeeb 635*8e93258fSBjoern A. Zeeb void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev, 636*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci, 637*8e93258fSBjoern A. Zeeb struct rtw89_pci_isrs *isrs) 638*8e93258fSBjoern A. Zeeb { 639*8e93258fSBjoern A. Zeeb isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs; 640*8e93258fSBjoern A. Zeeb isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ? 641*8e93258fSBjoern A. Zeeb rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0; 642*8e93258fSBjoern A. Zeeb isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ? 643*8e93258fSBjoern A. Zeeb rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0; 644*8e93258fSBjoern A. Zeeb isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ? 645*8e93258fSBjoern A. Zeeb rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0; 646*8e93258fSBjoern A. Zeeb 647*8e93258fSBjoern A. Zeeb if (isrs->halt_c2h_isrs) 648*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs); 649*8e93258fSBjoern A. Zeeb if (isrs->isrs[0]) 650*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]); 651*8e93258fSBjoern A. Zeeb if (isrs->isrs[1]) 652*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]); 653*8e93258fSBjoern A. Zeeb } 654*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1); 655*8e93258fSBjoern A. Zeeb 656*8e93258fSBjoern A. Zeeb static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00) 657*8e93258fSBjoern A. Zeeb { 658*8e93258fSBjoern A. Zeeb /* write 1 clear */ 659*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00); 660*8e93258fSBjoern A. Zeeb } 661*8e93258fSBjoern A. Zeeb 662*8e93258fSBjoern A. Zeeb void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 663*8e93258fSBjoern A. Zeeb { 664*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 665*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]); 666*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]); 667*8e93258fSBjoern A. Zeeb } 668*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_enable_intr); 669*8e93258fSBjoern A. Zeeb 670*8e93258fSBjoern A. Zeeb void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 671*8e93258fSBjoern A. Zeeb { 672*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HIMR0, 0); 673*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0); 674*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0); 675*8e93258fSBjoern A. Zeeb } 676*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_disable_intr); 677*8e93258fSBjoern A. Zeeb 678*8e93258fSBjoern A. Zeeb void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 679*8e93258fSBjoern A. Zeeb { 680*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs); 681*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs); 682*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]); 683*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]); 684*8e93258fSBjoern A. Zeeb } 685*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_enable_intr_v1); 686*8e93258fSBjoern A. Zeeb 687*8e93258fSBjoern A. Zeeb void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci) 688*8e93258fSBjoern A. Zeeb { 689*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0); 690*8e93258fSBjoern A. Zeeb } 691*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_disable_intr_v1); 692*8e93258fSBjoern A. Zeeb 693*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev) 694*8e93258fSBjoern A. Zeeb { 695*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 696*8e93258fSBjoern A. Zeeb unsigned long flags; 697*8e93258fSBjoern A. Zeeb 698*8e93258fSBjoern A. Zeeb spin_lock_irqsave(&rtwpci->irq_lock, flags); 699*8e93258fSBjoern A. Zeeb rtw89_chip_disable_intr(rtwdev, rtwpci); 700*8e93258fSBjoern A. Zeeb rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START); 701*8e93258fSBjoern A. Zeeb rtw89_chip_enable_intr(rtwdev, rtwpci); 702*8e93258fSBjoern A. Zeeb spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 703*8e93258fSBjoern A. Zeeb } 704*8e93258fSBjoern A. Zeeb 705*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev) 706*8e93258fSBjoern A. Zeeb { 707*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 708*8e93258fSBjoern A. Zeeb unsigned long flags; 709*8e93258fSBjoern A. Zeeb 710*8e93258fSBjoern A. Zeeb spin_lock_irqsave(&rtwpci->irq_lock, flags); 711*8e93258fSBjoern A. Zeeb rtw89_chip_disable_intr(rtwdev, rtwpci); 712*8e93258fSBjoern A. Zeeb rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE); 713*8e93258fSBjoern A. Zeeb rtw89_chip_enable_intr(rtwdev, rtwpci); 714*8e93258fSBjoern A. Zeeb spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 715*8e93258fSBjoern A. Zeeb } 716*8e93258fSBjoern A. Zeeb 717*8e93258fSBjoern A. Zeeb static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev) 718*8e93258fSBjoern A. Zeeb { 719*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 720*8e93258fSBjoern A. Zeeb int budget = NAPI_POLL_WEIGHT; 721*8e93258fSBjoern A. Zeeb 722*8e93258fSBjoern A. Zeeb /* To prevent RXQ get stuck due to run out of budget. */ 723*8e93258fSBjoern A. Zeeb rtwdev->napi_budget_countdown = budget; 724*8e93258fSBjoern A. Zeeb 725*8e93258fSBjoern A. Zeeb rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget); 726*8e93258fSBjoern A. Zeeb rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget); 727*8e93258fSBjoern A. Zeeb } 728*8e93258fSBjoern A. Zeeb 729*8e93258fSBjoern A. Zeeb static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev) 730*8e93258fSBjoern A. Zeeb { 731*8e93258fSBjoern A. Zeeb struct rtw89_dev *rtwdev = dev; 732*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 733*8e93258fSBjoern A. Zeeb struct rtw89_pci_isrs isrs; 734*8e93258fSBjoern A. Zeeb unsigned long flags; 735*8e93258fSBjoern A. Zeeb 736*8e93258fSBjoern A. Zeeb spin_lock_irqsave(&rtwpci->irq_lock, flags); 737*8e93258fSBjoern A. Zeeb rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs); 738*8e93258fSBjoern A. Zeeb spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 739*8e93258fSBjoern A. Zeeb 740*8e93258fSBjoern A. Zeeb if (unlikely(isrs.isrs[0] & B_AX_RDU_INT)) 741*8e93258fSBjoern A. Zeeb rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci); 742*8e93258fSBjoern A. Zeeb 743*8e93258fSBjoern A. Zeeb if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN)) 744*8e93258fSBjoern A. Zeeb rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev)); 745*8e93258fSBjoern A. Zeeb 746*8e93258fSBjoern A. Zeeb if (unlikely(isrs.halt_c2h_isrs & B_AX_WDT_TIMEOUT_INT_EN)) 747*8e93258fSBjoern A. Zeeb rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT); 748*8e93258fSBjoern A. Zeeb 749*8e93258fSBjoern A. Zeeb if (unlikely(rtwpci->under_recovery)) 750*8e93258fSBjoern A. Zeeb goto enable_intr; 751*8e93258fSBjoern A. Zeeb 752*8e93258fSBjoern A. Zeeb if (unlikely(rtwpci->low_power)) { 753*8e93258fSBjoern A. Zeeb rtw89_pci_low_power_interrupt_handler(rtwdev); 754*8e93258fSBjoern A. Zeeb goto enable_intr; 755*8e93258fSBjoern A. Zeeb } 756*8e93258fSBjoern A. Zeeb 757*8e93258fSBjoern A. Zeeb if (likely(rtwpci->running)) { 758*8e93258fSBjoern A. Zeeb local_bh_disable(); 759*8e93258fSBjoern A. Zeeb napi_schedule(&rtwdev->napi); 760*8e93258fSBjoern A. Zeeb local_bh_enable(); 761*8e93258fSBjoern A. Zeeb } 762*8e93258fSBjoern A. Zeeb 763*8e93258fSBjoern A. Zeeb return IRQ_HANDLED; 764*8e93258fSBjoern A. Zeeb 765*8e93258fSBjoern A. Zeeb enable_intr: 766*8e93258fSBjoern A. Zeeb spin_lock_irqsave(&rtwpci->irq_lock, flags); 767*8e93258fSBjoern A. Zeeb if (likely(rtwpci->running)) 768*8e93258fSBjoern A. Zeeb rtw89_chip_enable_intr(rtwdev, rtwpci); 769*8e93258fSBjoern A. Zeeb spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 770*8e93258fSBjoern A. Zeeb return IRQ_HANDLED; 771*8e93258fSBjoern A. Zeeb } 772*8e93258fSBjoern A. Zeeb 773*8e93258fSBjoern A. Zeeb static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev) 774*8e93258fSBjoern A. Zeeb { 775*8e93258fSBjoern A. Zeeb struct rtw89_dev *rtwdev = dev; 776*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 777*8e93258fSBjoern A. Zeeb unsigned long flags; 778*8e93258fSBjoern A. Zeeb irqreturn_t irqret = IRQ_WAKE_THREAD; 779*8e93258fSBjoern A. Zeeb 780*8e93258fSBjoern A. Zeeb spin_lock_irqsave(&rtwpci->irq_lock, flags); 781*8e93258fSBjoern A. Zeeb 782*8e93258fSBjoern A. Zeeb /* If interrupt event is on the road, it is still trigger interrupt 783*8e93258fSBjoern A. Zeeb * even we have done pci_stop() to turn off IMR. 784*8e93258fSBjoern A. Zeeb */ 785*8e93258fSBjoern A. Zeeb if (unlikely(!rtwpci->running)) { 786*8e93258fSBjoern A. Zeeb irqret = IRQ_HANDLED; 787*8e93258fSBjoern A. Zeeb goto exit; 788*8e93258fSBjoern A. Zeeb } 789*8e93258fSBjoern A. Zeeb 790*8e93258fSBjoern A. Zeeb rtw89_chip_disable_intr(rtwdev, rtwpci); 791*8e93258fSBjoern A. Zeeb exit: 792*8e93258fSBjoern A. Zeeb spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 793*8e93258fSBjoern A. Zeeb 794*8e93258fSBjoern A. Zeeb return irqret; 795*8e93258fSBjoern A. Zeeb } 796*8e93258fSBjoern A. Zeeb 797*8e93258fSBjoern A. Zeeb #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \ 798*8e93258fSBjoern A. Zeeb [RTW89_TXCH_##txch] = { \ 799*8e93258fSBjoern A. Zeeb .num = R_AX_##txch##_TXBD_NUM ##v, \ 800*8e93258fSBjoern A. Zeeb .idx = R_AX_##txch##_TXBD_IDX ##v, \ 801*8e93258fSBjoern A. Zeeb .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 802*8e93258fSBjoern A. Zeeb .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 803*8e93258fSBjoern A. Zeeb .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 804*8e93258fSBjoern A. Zeeb } 805*8e93258fSBjoern A. Zeeb 806*8e93258fSBjoern A. Zeeb #define DEF_TXCHADDRS(info, txch, v...) \ 807*8e93258fSBjoern A. Zeeb [RTW89_TXCH_##txch] = { \ 808*8e93258fSBjoern A. Zeeb .num = R_AX_##txch##_TXBD_NUM, \ 809*8e93258fSBjoern A. Zeeb .idx = R_AX_##txch##_TXBD_IDX, \ 810*8e93258fSBjoern A. Zeeb .bdram = R_AX_##txch##_BDRAM_CTRL ##v, \ 811*8e93258fSBjoern A. Zeeb .desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \ 812*8e93258fSBjoern A. Zeeb .desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \ 813*8e93258fSBjoern A. Zeeb } 814*8e93258fSBjoern A. Zeeb 815*8e93258fSBjoern A. Zeeb #define DEF_RXCHADDRS(info, rxch, v...) \ 816*8e93258fSBjoern A. Zeeb [RTW89_RXCH_##rxch] = { \ 817*8e93258fSBjoern A. Zeeb .num = R_AX_##rxch##_RXBD_NUM ##v, \ 818*8e93258fSBjoern A. Zeeb .idx = R_AX_##rxch##_RXBD_IDX ##v, \ 819*8e93258fSBjoern A. Zeeb .desa_l = R_AX_##rxch##_RXBD_DESA_L ##v, \ 820*8e93258fSBjoern A. Zeeb .desa_h = R_AX_##rxch##_RXBD_DESA_H ##v, \ 821*8e93258fSBjoern A. Zeeb } 822*8e93258fSBjoern A. Zeeb 823*8e93258fSBjoern A. Zeeb const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = { 824*8e93258fSBjoern A. Zeeb .tx = { 825*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH0), 826*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH1), 827*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH2), 828*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH3), 829*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH4), 830*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH5), 831*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH6), 832*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH7), 833*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, CH8), 834*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, CH9), 835*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS_TYPE1(info, CH10), 836*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS_TYPE1(info, CH11), 837*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, CH12), 838*8e93258fSBjoern A. Zeeb }, 839*8e93258fSBjoern A. Zeeb .rx = { 840*8e93258fSBjoern A. Zeeb DEF_RXCHADDRS(info, RXQ), 841*8e93258fSBjoern A. Zeeb DEF_RXCHADDRS(info, RPQ), 842*8e93258fSBjoern A. Zeeb }, 843*8e93258fSBjoern A. Zeeb }; 844*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set); 845*8e93258fSBjoern A. Zeeb 846*8e93258fSBjoern A. Zeeb const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = { 847*8e93258fSBjoern A. Zeeb .tx = { 848*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH0, _V1), 849*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH1, _V1), 850*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH2, _V1), 851*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH3, _V1), 852*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH4, _V1), 853*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH5, _V1), 854*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH6, _V1), 855*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, ACH7, _V1), 856*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, CH8, _V1), 857*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, CH9, _V1), 858*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS_TYPE1(info, CH10, _V1), 859*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS_TYPE1(info, CH11, _V1), 860*8e93258fSBjoern A. Zeeb DEF_TXCHADDRS(info, CH12, _V1), 861*8e93258fSBjoern A. Zeeb }, 862*8e93258fSBjoern A. Zeeb .rx = { 863*8e93258fSBjoern A. Zeeb DEF_RXCHADDRS(info, RXQ, _V1), 864*8e93258fSBjoern A. Zeeb DEF_RXCHADDRS(info, RPQ, _V1), 865*8e93258fSBjoern A. Zeeb }, 866*8e93258fSBjoern A. Zeeb }; 867*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1); 868*8e93258fSBjoern A. Zeeb 869*8e93258fSBjoern A. Zeeb #undef DEF_TXCHADDRS_TYPE1 870*8e93258fSBjoern A. Zeeb #undef DEF_TXCHADDRS 871*8e93258fSBjoern A. Zeeb #undef DEF_RXCHADDRS 872*8e93258fSBjoern A. Zeeb 873*8e93258fSBjoern A. Zeeb static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev, 874*8e93258fSBjoern A. Zeeb enum rtw89_tx_channel txch, 875*8e93258fSBjoern A. Zeeb const struct rtw89_pci_ch_dma_addr **addr) 876*8e93258fSBjoern A. Zeeb { 877*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 878*8e93258fSBjoern A. Zeeb 879*8e93258fSBjoern A. Zeeb if (txch >= RTW89_TXCH_NUM) 880*8e93258fSBjoern A. Zeeb return -EINVAL; 881*8e93258fSBjoern A. Zeeb 882*8e93258fSBjoern A. Zeeb *addr = &info->dma_addr_set->tx[txch]; 883*8e93258fSBjoern A. Zeeb 884*8e93258fSBjoern A. Zeeb return 0; 885*8e93258fSBjoern A. Zeeb } 886*8e93258fSBjoern A. Zeeb 887*8e93258fSBjoern A. Zeeb static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev, 888*8e93258fSBjoern A. Zeeb enum rtw89_rx_channel rxch, 889*8e93258fSBjoern A. Zeeb const struct rtw89_pci_ch_dma_addr **addr) 890*8e93258fSBjoern A. Zeeb { 891*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 892*8e93258fSBjoern A. Zeeb 893*8e93258fSBjoern A. Zeeb if (rxch >= RTW89_RXCH_NUM) 894*8e93258fSBjoern A. Zeeb return -EINVAL; 895*8e93258fSBjoern A. Zeeb 896*8e93258fSBjoern A. Zeeb *addr = &info->dma_addr_set->rx[rxch]; 897*8e93258fSBjoern A. Zeeb 898*8e93258fSBjoern A. Zeeb return 0; 899*8e93258fSBjoern A. Zeeb } 900*8e93258fSBjoern A. Zeeb 901*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring) 902*8e93258fSBjoern A. Zeeb { 903*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring; 904*8e93258fSBjoern A. Zeeb 905*8e93258fSBjoern A. Zeeb /* reserved 1 desc check ring is full or not */ 906*8e93258fSBjoern A. Zeeb if (bd_ring->rp > bd_ring->wp) 907*8e93258fSBjoern A. Zeeb return bd_ring->rp - bd_ring->wp - 1; 908*8e93258fSBjoern A. Zeeb 909*8e93258fSBjoern A. Zeeb return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1; 910*8e93258fSBjoern A. Zeeb } 911*8e93258fSBjoern A. Zeeb 912*8e93258fSBjoern A. Zeeb static 913*8e93258fSBjoern A. Zeeb u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev) 914*8e93258fSBjoern A. Zeeb { 915*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 916*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12]; 917*8e93258fSBjoern A. Zeeb u32 cnt; 918*8e93258fSBjoern A. Zeeb 919*8e93258fSBjoern A. Zeeb spin_lock_bh(&rtwpci->trx_lock); 920*8e93258fSBjoern A. Zeeb rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci); 921*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 922*8e93258fSBjoern A. Zeeb spin_unlock_bh(&rtwpci->trx_lock); 923*8e93258fSBjoern A. Zeeb 924*8e93258fSBjoern A. Zeeb return cnt; 925*8e93258fSBjoern A. Zeeb } 926*8e93258fSBjoern A. Zeeb 927*8e93258fSBjoern A. Zeeb static 928*8e93258fSBjoern A. Zeeb u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev, 929*8e93258fSBjoern A. Zeeb u8 txch) 930*8e93258fSBjoern A. Zeeb { 931*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 932*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 933*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 934*8e93258fSBjoern A. Zeeb u32 cnt; 935*8e93258fSBjoern A. Zeeb 936*8e93258fSBjoern A. Zeeb spin_lock_bh(&rtwpci->trx_lock); 937*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 938*8e93258fSBjoern A. Zeeb cnt = min(cnt, wd_ring->curr_num); 939*8e93258fSBjoern A. Zeeb spin_unlock_bh(&rtwpci->trx_lock); 940*8e93258fSBjoern A. Zeeb 941*8e93258fSBjoern A. Zeeb return cnt; 942*8e93258fSBjoern A. Zeeb } 943*8e93258fSBjoern A. Zeeb 944*8e93258fSBjoern A. Zeeb static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 945*8e93258fSBjoern A. Zeeb u8 txch) 946*8e93258fSBjoern A. Zeeb { 947*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 948*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 949*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 950*8e93258fSBjoern A. Zeeb u32 bd_cnt, wd_cnt, min_cnt = 0; 951*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring; 952*8e93258fSBjoern A. Zeeb u32 cnt; 953*8e93258fSBjoern A. Zeeb 954*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ]; 955*8e93258fSBjoern A. Zeeb 956*8e93258fSBjoern A. Zeeb spin_lock_bh(&rtwpci->trx_lock); 957*8e93258fSBjoern A. Zeeb bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 958*8e93258fSBjoern A. Zeeb wd_cnt = wd_ring->curr_num; 959*8e93258fSBjoern A. Zeeb 960*8e93258fSBjoern A. Zeeb if (wd_cnt == 0 || bd_cnt == 0) { 961*8e93258fSBjoern A. Zeeb cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring); 962*8e93258fSBjoern A. Zeeb if (cnt) 963*8e93258fSBjoern A. Zeeb rtw89_pci_release_tx(rtwdev, rx_ring, cnt); 964*8e93258fSBjoern A. Zeeb else if (wd_cnt == 0) 965*8e93258fSBjoern A. Zeeb goto out_unlock; 966*8e93258fSBjoern A. Zeeb 967*8e93258fSBjoern A. Zeeb bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 968*8e93258fSBjoern A. Zeeb if (bd_cnt == 0) 969*8e93258fSBjoern A. Zeeb rtw89_pci_reclaim_txbd(rtwdev, tx_ring); 970*8e93258fSBjoern A. Zeeb } 971*8e93258fSBjoern A. Zeeb 972*8e93258fSBjoern A. Zeeb bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring); 973*8e93258fSBjoern A. Zeeb wd_cnt = wd_ring->curr_num; 974*8e93258fSBjoern A. Zeeb min_cnt = min(bd_cnt, wd_cnt); 975*8e93258fSBjoern A. Zeeb if (min_cnt == 0) 976*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, rtwpci->low_power ? RTW89_DBG_TXRX : RTW89_DBG_UNEXP, 977*8e93258fSBjoern A. Zeeb "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n", 978*8e93258fSBjoern A. Zeeb wd_cnt, bd_cnt); 979*8e93258fSBjoern A. Zeeb 980*8e93258fSBjoern A. Zeeb out_unlock: 981*8e93258fSBjoern A. Zeeb spin_unlock_bh(&rtwpci->trx_lock); 982*8e93258fSBjoern A. Zeeb 983*8e93258fSBjoern A. Zeeb return min_cnt; 984*8e93258fSBjoern A. Zeeb } 985*8e93258fSBjoern A. Zeeb 986*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, 987*8e93258fSBjoern A. Zeeb u8 txch) 988*8e93258fSBjoern A. Zeeb { 989*8e93258fSBjoern A. Zeeb if (rtwdev->hci.paused) 990*8e93258fSBjoern A. Zeeb return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch); 991*8e93258fSBjoern A. Zeeb 992*8e93258fSBjoern A. Zeeb if (txch == RTW89_TXCH_CH12) 993*8e93258fSBjoern A. Zeeb return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev); 994*8e93258fSBjoern A. Zeeb 995*8e93258fSBjoern A. Zeeb return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch); 996*8e93258fSBjoern A. Zeeb } 997*8e93258fSBjoern A. Zeeb 998*8e93258fSBjoern A. Zeeb static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring) 999*8e93258fSBjoern A. Zeeb { 1000*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1001*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1002*8e93258fSBjoern A. Zeeb u32 host_idx, addr; 1003*8e93258fSBjoern A. Zeeb 1004*8e93258fSBjoern A. Zeeb spin_lock_bh(&rtwpci->trx_lock); 1005*8e93258fSBjoern A. Zeeb 1006*8e93258fSBjoern A. Zeeb addr = bd_ring->addr.idx; 1007*8e93258fSBjoern A. Zeeb host_idx = bd_ring->wp; 1008*8e93258fSBjoern A. Zeeb rtw89_write16(rtwdev, addr, host_idx); 1009*8e93258fSBjoern A. Zeeb 1010*8e93258fSBjoern A. Zeeb spin_unlock_bh(&rtwpci->trx_lock); 1011*8e93258fSBjoern A. Zeeb } 1012*8e93258fSBjoern A. Zeeb 1013*8e93258fSBjoern A. Zeeb static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring, 1014*8e93258fSBjoern A. Zeeb int n_txbd) 1015*8e93258fSBjoern A. Zeeb { 1016*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1017*8e93258fSBjoern A. Zeeb u32 host_idx, len; 1018*8e93258fSBjoern A. Zeeb 1019*8e93258fSBjoern A. Zeeb len = bd_ring->len; 1020*8e93258fSBjoern A. Zeeb host_idx = bd_ring->wp + n_txbd; 1021*8e93258fSBjoern A. Zeeb host_idx = host_idx < len ? host_idx : host_idx - len; 1022*8e93258fSBjoern A. Zeeb 1023*8e93258fSBjoern A. Zeeb bd_ring->wp = host_idx; 1024*8e93258fSBjoern A. Zeeb } 1025*8e93258fSBjoern A. Zeeb 1026*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch) 1027*8e93258fSBjoern A. Zeeb { 1028*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1029*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1030*8e93258fSBjoern A. Zeeb 1031*8e93258fSBjoern A. Zeeb if (rtwdev->hci.paused) { 1032*8e93258fSBjoern A. Zeeb set_bit(txch, rtwpci->kick_map); 1033*8e93258fSBjoern A. Zeeb return; 1034*8e93258fSBjoern A. Zeeb } 1035*8e93258fSBjoern A. Zeeb 1036*8e93258fSBjoern A. Zeeb __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1037*8e93258fSBjoern A. Zeeb } 1038*8e93258fSBjoern A. Zeeb 1039*8e93258fSBjoern A. Zeeb static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev) 1040*8e93258fSBjoern A. Zeeb { 1041*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1042*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring; 1043*8e93258fSBjoern A. Zeeb int txch; 1044*8e93258fSBjoern A. Zeeb 1045*8e93258fSBjoern A. Zeeb for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1046*8e93258fSBjoern A. Zeeb if (!test_and_clear_bit(txch, rtwpci->kick_map)) 1047*8e93258fSBjoern A. Zeeb continue; 1048*8e93258fSBjoern A. Zeeb 1049*8e93258fSBjoern A. Zeeb tx_ring = &rtwpci->tx_rings[txch]; 1050*8e93258fSBjoern A. Zeeb __rtw89_pci_tx_kick_off(rtwdev, tx_ring); 1051*8e93258fSBjoern A. Zeeb } 1052*8e93258fSBjoern A. Zeeb } 1053*8e93258fSBjoern A. Zeeb 1054*8e93258fSBjoern A. Zeeb static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop) 1055*8e93258fSBjoern A. Zeeb { 1056*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1057*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch]; 1058*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring; 1059*8e93258fSBjoern A. Zeeb u32 cur_idx, cur_rp; 1060*8e93258fSBjoern A. Zeeb u8 i; 1061*8e93258fSBjoern A. Zeeb 1062*8e93258fSBjoern A. Zeeb /* Because the time taked by the I/O is a bit dynamic, it's hard to 1063*8e93258fSBjoern A. Zeeb * define a reasonable fixed total timeout to use read_poll_timeout* 1064*8e93258fSBjoern A. Zeeb * helper. Instead, we can ensure a reasonable polling times, so we 1065*8e93258fSBjoern A. Zeeb * just use for loop with udelay here. 1066*8e93258fSBjoern A. Zeeb */ 1067*8e93258fSBjoern A. Zeeb for (i = 0; i < 60; i++) { 1068*8e93258fSBjoern A. Zeeb cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx); 1069*8e93258fSBjoern A. Zeeb cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx); 1070*8e93258fSBjoern A. Zeeb if (cur_rp == bd_ring->wp) 1071*8e93258fSBjoern A. Zeeb return; 1072*8e93258fSBjoern A. Zeeb 1073*8e93258fSBjoern A. Zeeb udelay(1); 1074*8e93258fSBjoern A. Zeeb } 1075*8e93258fSBjoern A. Zeeb 1076*8e93258fSBjoern A. Zeeb if (!drop) 1077*8e93258fSBjoern A. Zeeb rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch); 1078*8e93258fSBjoern A. Zeeb } 1079*8e93258fSBjoern A. Zeeb 1080*8e93258fSBjoern A. Zeeb static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs, 1081*8e93258fSBjoern A. Zeeb bool drop) 1082*8e93258fSBjoern A. Zeeb { 1083*8e93258fSBjoern A. Zeeb u8 i; 1084*8e93258fSBjoern A. Zeeb 1085*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_TXCH_NUM; i++) { 1086*8e93258fSBjoern A. Zeeb /* It may be unnecessary to flush FWCMD queue. */ 1087*8e93258fSBjoern A. Zeeb if (i == RTW89_TXCH_CH12) 1088*8e93258fSBjoern A. Zeeb continue; 1089*8e93258fSBjoern A. Zeeb 1090*8e93258fSBjoern A. Zeeb if (txchs & BIT(i)) 1091*8e93258fSBjoern A. Zeeb __pci_flush_txch(rtwdev, i, drop); 1092*8e93258fSBjoern A. Zeeb } 1093*8e93258fSBjoern A. Zeeb } 1094*8e93258fSBjoern A. Zeeb 1095*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues, 1096*8e93258fSBjoern A. Zeeb bool drop) 1097*8e93258fSBjoern A. Zeeb { 1098*8e93258fSBjoern A. Zeeb __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop); 1099*8e93258fSBjoern A. Zeeb } 1100*8e93258fSBjoern A. Zeeb 1101*8e93258fSBjoern A. Zeeb u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, 1102*8e93258fSBjoern A. Zeeb void *txaddr_info_addr, u32 total_len, 1103*8e93258fSBjoern A. Zeeb dma_addr_t dma, u8 *add_info_nr) 1104*8e93258fSBjoern A. Zeeb { 1105*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; 1106*8e93258fSBjoern A. Zeeb 1107*8e93258fSBjoern A. Zeeb txaddr_info->length = cpu_to_le16(total_len); 1108*8e93258fSBjoern A. Zeeb txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | 1109*8e93258fSBjoern A. Zeeb RTW89_PCI_ADDR_NUM(1)); 1110*8e93258fSBjoern A. Zeeb txaddr_info->dma = cpu_to_le32(dma); 1111*8e93258fSBjoern A. Zeeb 1112*8e93258fSBjoern A. Zeeb *add_info_nr = 1; 1113*8e93258fSBjoern A. Zeeb 1114*8e93258fSBjoern A. Zeeb return sizeof(*txaddr_info); 1115*8e93258fSBjoern A. Zeeb } 1116*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info); 1117*8e93258fSBjoern A. Zeeb 1118*8e93258fSBjoern A. Zeeb u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, 1119*8e93258fSBjoern A. Zeeb void *txaddr_info_addr, u32 total_len, 1120*8e93258fSBjoern A. Zeeb dma_addr_t dma, u8 *add_info_nr) 1121*8e93258fSBjoern A. Zeeb { 1122*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr; 1123*8e93258fSBjoern A. Zeeb u32 remain = total_len; 1124*8e93258fSBjoern A. Zeeb u32 len; 1125*8e93258fSBjoern A. Zeeb u16 length_option; 1126*8e93258fSBjoern A. Zeeb int n; 1127*8e93258fSBjoern A. Zeeb 1128*8e93258fSBjoern A. Zeeb for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) { 1129*8e93258fSBjoern A. Zeeb len = remain >= TXADDR_INFO_LENTHG_V1_MAX ? 1130*8e93258fSBjoern A. Zeeb TXADDR_INFO_LENTHG_V1_MAX : remain; 1131*8e93258fSBjoern A. Zeeb remain -= len; 1132*8e93258fSBjoern A. Zeeb 1133*8e93258fSBjoern A. Zeeb length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | 1134*8e93258fSBjoern A. Zeeb FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | 1135*8e93258fSBjoern A. Zeeb FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); 1136*8e93258fSBjoern A. Zeeb txaddr_info->length_opt = cpu_to_le16(length_option); 1137*8e93258fSBjoern A. Zeeb txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); 1138*8e93258fSBjoern A. Zeeb txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); 1139*8e93258fSBjoern A. Zeeb 1140*8e93258fSBjoern A. Zeeb dma += len; 1141*8e93258fSBjoern A. Zeeb txaddr_info++; 1142*8e93258fSBjoern A. Zeeb } 1143*8e93258fSBjoern A. Zeeb 1144*8e93258fSBjoern A. Zeeb WARN_ONCE(remain, "length overflow remain=%u total_len=%u", 1145*8e93258fSBjoern A. Zeeb remain, total_len); 1146*8e93258fSBjoern A. Zeeb 1147*8e93258fSBjoern A. Zeeb *add_info_nr = n; 1148*8e93258fSBjoern A. Zeeb 1149*8e93258fSBjoern A. Zeeb return n * sizeof(*txaddr_info); 1150*8e93258fSBjoern A. Zeeb } 1151*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1); 1152*8e93258fSBjoern A. Zeeb 1153*8e93258fSBjoern A. Zeeb static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev, 1154*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring, 1155*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd *txwd, 1156*8e93258fSBjoern A. Zeeb struct rtw89_core_tx_request *tx_req) 1157*8e93258fSBjoern A. Zeeb { 1158*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1159*8e93258fSBjoern A. Zeeb const struct rtw89_chip_info *chip = rtwdev->chip; 1160*8e93258fSBjoern A. Zeeb struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1161*8e93258fSBjoern A. Zeeb struct rtw89_txwd_info *txwd_info; 1162*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wp_info *txwp_info; 1163*8e93258fSBjoern A. Zeeb void *txaddr_info_addr; 1164*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 1165*8e93258fSBjoern A. Zeeb struct sk_buff *skb = tx_req->skb; 1166*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1167*8e93258fSBjoern A. Zeeb bool en_wd_info = desc_info->en_wd_info; 1168*8e93258fSBjoern A. Zeeb u32 txwd_len; 1169*8e93258fSBjoern A. Zeeb u32 txwp_len; 1170*8e93258fSBjoern A. Zeeb u32 txaddr_info_len; 1171*8e93258fSBjoern A. Zeeb dma_addr_t dma; 1172*8e93258fSBjoern A. Zeeb int ret; 1173*8e93258fSBjoern A. Zeeb 1174*8e93258fSBjoern A. Zeeb dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1175*8e93258fSBjoern A. Zeeb if (dma_mapping_error(&pdev->dev, dma)) { 1176*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to map skb dma data\n"); 1177*8e93258fSBjoern A. Zeeb ret = -EBUSY; 1178*8e93258fSBjoern A. Zeeb goto err; 1179*8e93258fSBjoern A. Zeeb } 1180*8e93258fSBjoern A. Zeeb 1181*8e93258fSBjoern A. Zeeb tx_data->dma = dma; 1182*8e93258fSBjoern A. Zeeb 1183*8e93258fSBjoern A. Zeeb txwp_len = sizeof(*txwp_info); 1184*8e93258fSBjoern A. Zeeb txwd_len = chip->txwd_body_size; 1185*8e93258fSBjoern A. Zeeb txwd_len += en_wd_info ? sizeof(*txwd_info) : 0; 1186*8e93258fSBjoern A. Zeeb 1187*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1188*8e93258fSBjoern A. Zeeb txwp_info = txwd->vaddr + txwd_len; 1189*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1190*8e93258fSBjoern A. Zeeb txwp_info = (struct rtw89_pci_tx_wp_info *)((u8 *)txwd->vaddr + txwd_len); 1191*8e93258fSBjoern A. Zeeb #endif 1192*8e93258fSBjoern A. Zeeb txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID); 1193*8e93258fSBjoern A. Zeeb txwp_info->seq1 = 0; 1194*8e93258fSBjoern A. Zeeb txwp_info->seq2 = 0; 1195*8e93258fSBjoern A. Zeeb txwp_info->seq3 = 0; 1196*8e93258fSBjoern A. Zeeb 1197*8e93258fSBjoern A. Zeeb tx_ring->tx_cnt++; 1198*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1199*8e93258fSBjoern A. Zeeb txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len; 1200*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1201*8e93258fSBjoern A. Zeeb txaddr_info_addr = (u8 *)txwd->vaddr + txwd_len + txwp_len; 1202*8e93258fSBjoern A. Zeeb #endif 1203*8e93258fSBjoern A. Zeeb txaddr_info_len = 1204*8e93258fSBjoern A. Zeeb rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len, 1205*8e93258fSBjoern A. Zeeb dma, &desc_info->addr_info_nr); 1206*8e93258fSBjoern A. Zeeb 1207*8e93258fSBjoern A. Zeeb txwd->len = txwd_len + txwp_len + txaddr_info_len; 1208*8e93258fSBjoern A. Zeeb 1209*8e93258fSBjoern A. Zeeb rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr); 1210*8e93258fSBjoern A. Zeeb 1211*8e93258fSBjoern A. Zeeb skb_queue_tail(&txwd->queue, skb); 1212*8e93258fSBjoern A. Zeeb 1213*8e93258fSBjoern A. Zeeb return 0; 1214*8e93258fSBjoern A. Zeeb 1215*8e93258fSBjoern A. Zeeb err: 1216*8e93258fSBjoern A. Zeeb return ret; 1217*8e93258fSBjoern A. Zeeb } 1218*8e93258fSBjoern A. Zeeb 1219*8e93258fSBjoern A. Zeeb static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, 1220*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring, 1221*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_bd_32 *txbd, 1222*8e93258fSBjoern A. Zeeb struct rtw89_core_tx_request *tx_req) 1223*8e93258fSBjoern A. Zeeb { 1224*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1225*8e93258fSBjoern A. Zeeb const struct rtw89_chip_info *chip = rtwdev->chip; 1226*8e93258fSBjoern A. Zeeb struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1227*8e93258fSBjoern A. Zeeb void *txdesc; 1228*8e93258fSBjoern A. Zeeb int txdesc_size = chip->h2c_desc_size; 1229*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 1230*8e93258fSBjoern A. Zeeb struct sk_buff *skb = tx_req->skb; 1231*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); 1232*8e93258fSBjoern A. Zeeb dma_addr_t dma; 1233*8e93258fSBjoern A. Zeeb 1234*8e93258fSBjoern A. Zeeb txdesc = skb_push(skb, txdesc_size); 1235*8e93258fSBjoern A. Zeeb memset(txdesc, 0, txdesc_size); 1236*8e93258fSBjoern A. Zeeb rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc); 1237*8e93258fSBjoern A. Zeeb 1238*8e93258fSBjoern A. Zeeb dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); 1239*8e93258fSBjoern A. Zeeb if (dma_mapping_error(&pdev->dev, dma)) { 1240*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to map fwcmd dma data\n"); 1241*8e93258fSBjoern A. Zeeb return -EBUSY; 1242*8e93258fSBjoern A. Zeeb } 1243*8e93258fSBjoern A. Zeeb 1244*8e93258fSBjoern A. Zeeb tx_data->dma = dma; 1245*8e93258fSBjoern A. Zeeb txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1246*8e93258fSBjoern A. Zeeb txbd->length = cpu_to_le16(skb->len); 1247*8e93258fSBjoern A. Zeeb txbd->dma = cpu_to_le32(tx_data->dma); 1248*8e93258fSBjoern A. Zeeb skb_queue_tail(&rtwpci->h2c_queue, skb); 1249*8e93258fSBjoern A. Zeeb 1250*8e93258fSBjoern A. Zeeb rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1251*8e93258fSBjoern A. Zeeb 1252*8e93258fSBjoern A. Zeeb return 0; 1253*8e93258fSBjoern A. Zeeb } 1254*8e93258fSBjoern A. Zeeb 1255*8e93258fSBjoern A. Zeeb static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, 1256*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring, 1257*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_bd_32 *txbd, 1258*8e93258fSBjoern A. Zeeb struct rtw89_core_tx_request *tx_req) 1259*8e93258fSBjoern A. Zeeb { 1260*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd *txwd; 1261*8e93258fSBjoern A. Zeeb int ret; 1262*8e93258fSBjoern A. Zeeb 1263*8e93258fSBjoern A. Zeeb /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD 1264*8e93258fSBjoern A. Zeeb * buffer with WD BODY only. So here we don't need to check the free 1265*8e93258fSBjoern A. Zeeb * pages of the wd ring. 1266*8e93258fSBjoern A. Zeeb */ 1267*8e93258fSBjoern A. Zeeb if (tx_ring->txch == RTW89_TXCH_CH12) 1268*8e93258fSBjoern A. Zeeb return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req); 1269*8e93258fSBjoern A. Zeeb 1270*8e93258fSBjoern A. Zeeb txwd = rtw89_pci_dequeue_txwd(tx_ring); 1271*8e93258fSBjoern A. Zeeb if (!txwd) { 1272*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "no available TXWD\n"); 1273*8e93258fSBjoern A. Zeeb ret = -ENOSPC; 1274*8e93258fSBjoern A. Zeeb goto err; 1275*8e93258fSBjoern A. Zeeb } 1276*8e93258fSBjoern A. Zeeb 1277*8e93258fSBjoern A. Zeeb ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req); 1278*8e93258fSBjoern A. Zeeb if (ret) { 1279*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq); 1280*8e93258fSBjoern A. Zeeb goto err_enqueue_wd; 1281*8e93258fSBjoern A. Zeeb } 1282*8e93258fSBjoern A. Zeeb 1283*8e93258fSBjoern A. Zeeb list_add_tail(&txwd->list, &tx_ring->busy_pages); 1284*8e93258fSBjoern A. Zeeb 1285*8e93258fSBjoern A. Zeeb txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); 1286*8e93258fSBjoern A. Zeeb txbd->length = cpu_to_le16(txwd->len); 1287*8e93258fSBjoern A. Zeeb txbd->dma = cpu_to_le32(txwd->paddr); 1288*8e93258fSBjoern A. Zeeb 1289*8e93258fSBjoern A. Zeeb rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1); 1290*8e93258fSBjoern A. Zeeb 1291*8e93258fSBjoern A. Zeeb return 0; 1292*8e93258fSBjoern A. Zeeb 1293*8e93258fSBjoern A. Zeeb err_enqueue_wd: 1294*8e93258fSBjoern A. Zeeb rtw89_pci_enqueue_txwd(tx_ring, txwd); 1295*8e93258fSBjoern A. Zeeb err: 1296*8e93258fSBjoern A. Zeeb return ret; 1297*8e93258fSBjoern A. Zeeb } 1298*8e93258fSBjoern A. Zeeb 1299*8e93258fSBjoern A. Zeeb static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req, 1300*8e93258fSBjoern A. Zeeb u8 txch) 1301*8e93258fSBjoern A. Zeeb { 1302*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1303*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring; 1304*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_bd_32 *txbd; 1305*8e93258fSBjoern A. Zeeb u32 n_avail_txbd; 1306*8e93258fSBjoern A. Zeeb int ret = 0; 1307*8e93258fSBjoern A. Zeeb 1308*8e93258fSBjoern A. Zeeb /* check the tx type and dma channel for fw cmd queue */ 1309*8e93258fSBjoern A. Zeeb if ((txch == RTW89_TXCH_CH12 || 1310*8e93258fSBjoern A. Zeeb tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) && 1311*8e93258fSBjoern A. Zeeb (txch != RTW89_TXCH_CH12 || 1312*8e93258fSBjoern A. Zeeb tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) { 1313*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n"); 1314*8e93258fSBjoern A. Zeeb return -EINVAL; 1315*8e93258fSBjoern A. Zeeb } 1316*8e93258fSBjoern A. Zeeb 1317*8e93258fSBjoern A. Zeeb tx_ring = &rtwpci->tx_rings[txch]; 1318*8e93258fSBjoern A. Zeeb spin_lock_bh(&rtwpci->trx_lock); 1319*8e93258fSBjoern A. Zeeb 1320*8e93258fSBjoern A. Zeeb n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring); 1321*8e93258fSBjoern A. Zeeb if (n_avail_txbd == 0) { 1322*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "no available TXBD\n"); 1323*8e93258fSBjoern A. Zeeb ret = -ENOSPC; 1324*8e93258fSBjoern A. Zeeb goto err_unlock; 1325*8e93258fSBjoern A. Zeeb } 1326*8e93258fSBjoern A. Zeeb 1327*8e93258fSBjoern A. Zeeb txbd = rtw89_pci_get_next_txbd(tx_ring); 1328*8e93258fSBjoern A. Zeeb ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req); 1329*8e93258fSBjoern A. Zeeb if (ret) { 1330*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to submit TXBD\n"); 1331*8e93258fSBjoern A. Zeeb goto err_unlock; 1332*8e93258fSBjoern A. Zeeb } 1333*8e93258fSBjoern A. Zeeb 1334*8e93258fSBjoern A. Zeeb spin_unlock_bh(&rtwpci->trx_lock); 1335*8e93258fSBjoern A. Zeeb return 0; 1336*8e93258fSBjoern A. Zeeb 1337*8e93258fSBjoern A. Zeeb err_unlock: 1338*8e93258fSBjoern A. Zeeb spin_unlock_bh(&rtwpci->trx_lock); 1339*8e93258fSBjoern A. Zeeb return ret; 1340*8e93258fSBjoern A. Zeeb } 1341*8e93258fSBjoern A. Zeeb 1342*8e93258fSBjoern A. Zeeb static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) 1343*8e93258fSBjoern A. Zeeb { 1344*8e93258fSBjoern A. Zeeb struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; 1345*8e93258fSBjoern A. Zeeb int ret; 1346*8e93258fSBjoern A. Zeeb 1347*8e93258fSBjoern A. Zeeb ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma); 1348*8e93258fSBjoern A. Zeeb if (ret) { 1349*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma); 1350*8e93258fSBjoern A. Zeeb return ret; 1351*8e93258fSBjoern A. Zeeb } 1352*8e93258fSBjoern A. Zeeb 1353*8e93258fSBjoern A. Zeeb return 0; 1354*8e93258fSBjoern A. Zeeb } 1355*8e93258fSBjoern A. Zeeb 1356*8e93258fSBjoern A. Zeeb static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = { 1357*8e93258fSBjoern A. Zeeb [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2}, 1358*8e93258fSBjoern A. Zeeb [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2}, 1359*8e93258fSBjoern A. Zeeb [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2}, 1360*8e93258fSBjoern A. Zeeb [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2}, 1361*8e93258fSBjoern A. Zeeb [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2}, 1362*8e93258fSBjoern A. Zeeb [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2}, 1363*8e93258fSBjoern A. Zeeb [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2}, 1364*8e93258fSBjoern A. Zeeb [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2}, 1365*8e93258fSBjoern A. Zeeb [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1}, 1366*8e93258fSBjoern A. Zeeb [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1}, 1367*8e93258fSBjoern A. Zeeb [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1}, 1368*8e93258fSBjoern A. Zeeb [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1}, 1369*8e93258fSBjoern A. Zeeb [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1}, 1370*8e93258fSBjoern A. Zeeb }; 1371*8e93258fSBjoern A. Zeeb 1372*8e93258fSBjoern A. Zeeb static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) 1373*8e93258fSBjoern A. Zeeb { 1374*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1375*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring; 1376*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring; 1377*8e93258fSBjoern A. Zeeb struct rtw89_pci_dma_ring *bd_ring; 1378*8e93258fSBjoern A. Zeeb const struct rtw89_pci_bd_ram *bd_ram; 1379*8e93258fSBjoern A. Zeeb u32 addr_num; 1380*8e93258fSBjoern A. Zeeb u32 addr_bdram; 1381*8e93258fSBjoern A. Zeeb u32 addr_desa_l; 1382*8e93258fSBjoern A. Zeeb u32 val32; 1383*8e93258fSBjoern A. Zeeb int i; 1384*8e93258fSBjoern A. Zeeb 1385*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_TXCH_NUM; i++) { 1386*8e93258fSBjoern A. Zeeb tx_ring = &rtwpci->tx_rings[i]; 1387*8e93258fSBjoern A. Zeeb bd_ring = &tx_ring->bd_ring; 1388*8e93258fSBjoern A. Zeeb bd_ram = &bd_ram_table[i]; 1389*8e93258fSBjoern A. Zeeb addr_num = bd_ring->addr.num; 1390*8e93258fSBjoern A. Zeeb addr_bdram = bd_ring->addr.bdram; 1391*8e93258fSBjoern A. Zeeb addr_desa_l = bd_ring->addr.desa_l; 1392*8e93258fSBjoern A. Zeeb bd_ring->wp = 0; 1393*8e93258fSBjoern A. Zeeb bd_ring->rp = 0; 1394*8e93258fSBjoern A. Zeeb 1395*8e93258fSBjoern A. Zeeb val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) | 1396*8e93258fSBjoern A. Zeeb FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) | 1397*8e93258fSBjoern A. Zeeb FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num); 1398*8e93258fSBjoern A. Zeeb 1399*8e93258fSBjoern A. Zeeb rtw89_write16(rtwdev, addr_num, bd_ring->len); 1400*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, addr_bdram, val32); 1401*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1402*8e93258fSBjoern A. Zeeb } 1403*8e93258fSBjoern A. Zeeb 1404*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_RXCH_NUM; i++) { 1405*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[i]; 1406*8e93258fSBjoern A. Zeeb bd_ring = &rx_ring->bd_ring; 1407*8e93258fSBjoern A. Zeeb addr_num = bd_ring->addr.num; 1408*8e93258fSBjoern A. Zeeb addr_desa_l = bd_ring->addr.desa_l; 1409*8e93258fSBjoern A. Zeeb bd_ring->wp = 0; 1410*8e93258fSBjoern A. Zeeb bd_ring->rp = 0; 1411*8e93258fSBjoern A. Zeeb rx_ring->diliver_skb = NULL; 1412*8e93258fSBjoern A. Zeeb rx_ring->diliver_desc.ready = false; 1413*8e93258fSBjoern A. Zeeb 1414*8e93258fSBjoern A. Zeeb rtw89_write16(rtwdev, addr_num, bd_ring->len); 1415*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); 1416*8e93258fSBjoern A. Zeeb } 1417*8e93258fSBjoern A. Zeeb } 1418*8e93258fSBjoern A. Zeeb 1419*8e93258fSBjoern A. Zeeb static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, 1420*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring) 1421*8e93258fSBjoern A. Zeeb { 1422*8e93258fSBjoern A. Zeeb rtw89_pci_release_busy_txwd(rtwdev, tx_ring); 1423*8e93258fSBjoern A. Zeeb rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring); 1424*8e93258fSBjoern A. Zeeb } 1425*8e93258fSBjoern A. Zeeb 1426*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev) 1427*8e93258fSBjoern A. Zeeb { 1428*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1429*8e93258fSBjoern A. Zeeb int txch; 1430*8e93258fSBjoern A. Zeeb 1431*8e93258fSBjoern A. Zeeb rtw89_pci_reset_trx_rings(rtwdev); 1432*8e93258fSBjoern A. Zeeb 1433*8e93258fSBjoern A. Zeeb spin_lock_bh(&rtwpci->trx_lock); 1434*8e93258fSBjoern A. Zeeb for (txch = 0; txch < RTW89_TXCH_NUM; txch++) { 1435*8e93258fSBjoern A. Zeeb if (txch == RTW89_TXCH_CH12) { 1436*8e93258fSBjoern A. Zeeb rtw89_pci_release_fwcmd(rtwdev, rtwpci, 1437*8e93258fSBjoern A. Zeeb skb_queue_len(&rtwpci->h2c_queue), true); 1438*8e93258fSBjoern A. Zeeb continue; 1439*8e93258fSBjoern A. Zeeb } 1440*8e93258fSBjoern A. Zeeb rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]); 1441*8e93258fSBjoern A. Zeeb } 1442*8e93258fSBjoern A. Zeeb spin_unlock_bh(&rtwpci->trx_lock); 1443*8e93258fSBjoern A. Zeeb } 1444*8e93258fSBjoern A. Zeeb 1445*8e93258fSBjoern A. Zeeb static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev) 1446*8e93258fSBjoern A. Zeeb { 1447*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1448*8e93258fSBjoern A. Zeeb unsigned long flags; 1449*8e93258fSBjoern A. Zeeb 1450*8e93258fSBjoern A. Zeeb spin_lock_irqsave(&rtwpci->irq_lock, flags); 1451*8e93258fSBjoern A. Zeeb rtwpci->running = true; 1452*8e93258fSBjoern A. Zeeb rtw89_chip_enable_intr(rtwdev, rtwpci); 1453*8e93258fSBjoern A. Zeeb spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1454*8e93258fSBjoern A. Zeeb } 1455*8e93258fSBjoern A. Zeeb 1456*8e93258fSBjoern A. Zeeb static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev) 1457*8e93258fSBjoern A. Zeeb { 1458*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1459*8e93258fSBjoern A. Zeeb unsigned long flags; 1460*8e93258fSBjoern A. Zeeb 1461*8e93258fSBjoern A. Zeeb spin_lock_irqsave(&rtwpci->irq_lock, flags); 1462*8e93258fSBjoern A. Zeeb rtwpci->running = false; 1463*8e93258fSBjoern A. Zeeb rtw89_chip_disable_intr(rtwdev, rtwpci); 1464*8e93258fSBjoern A. Zeeb spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 1465*8e93258fSBjoern A. Zeeb } 1466*8e93258fSBjoern A. Zeeb 1467*8e93258fSBjoern A. Zeeb static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev) 1468*8e93258fSBjoern A. Zeeb { 1469*8e93258fSBjoern A. Zeeb rtw89_core_napi_start(rtwdev); 1470*8e93258fSBjoern A. Zeeb rtw89_pci_enable_intr_lock(rtwdev); 1471*8e93258fSBjoern A. Zeeb 1472*8e93258fSBjoern A. Zeeb return 0; 1473*8e93258fSBjoern A. Zeeb } 1474*8e93258fSBjoern A. Zeeb 1475*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev) 1476*8e93258fSBjoern A. Zeeb { 1477*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1478*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 1479*8e93258fSBjoern A. Zeeb 1480*8e93258fSBjoern A. Zeeb rtw89_pci_disable_intr_lock(rtwdev); 1481*8e93258fSBjoern A. Zeeb synchronize_irq(pdev->irq); 1482*8e93258fSBjoern A. Zeeb rtw89_core_napi_stop(rtwdev); 1483*8e93258fSBjoern A. Zeeb } 1484*8e93258fSBjoern A. Zeeb 1485*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause) 1486*8e93258fSBjoern A. Zeeb { 1487*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1488*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 1489*8e93258fSBjoern A. Zeeb 1490*8e93258fSBjoern A. Zeeb if (pause) { 1491*8e93258fSBjoern A. Zeeb rtw89_pci_disable_intr_lock(rtwdev); 1492*8e93258fSBjoern A. Zeeb synchronize_irq(pdev->irq); 1493*8e93258fSBjoern A. Zeeb if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags)) 1494*8e93258fSBjoern A. Zeeb napi_synchronize(&rtwdev->napi); 1495*8e93258fSBjoern A. Zeeb } else { 1496*8e93258fSBjoern A. Zeeb rtw89_pci_enable_intr_lock(rtwdev); 1497*8e93258fSBjoern A. Zeeb rtw89_pci_tx_kick_off_pending(rtwdev); 1498*8e93258fSBjoern A. Zeeb } 1499*8e93258fSBjoern A. Zeeb } 1500*8e93258fSBjoern A. Zeeb 1501*8e93258fSBjoern A. Zeeb static 1502*8e93258fSBjoern A. Zeeb void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power) 1503*8e93258fSBjoern A. Zeeb { 1504*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1505*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 1506*8e93258fSBjoern A. Zeeb const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power; 1507*8e93258fSBjoern A. Zeeb const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set; 1508*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring; 1509*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring; 1510*8e93258fSBjoern A. Zeeb int i; 1511*8e93258fSBjoern A. Zeeb 1512*8e93258fSBjoern A. Zeeb if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n")) 1513*8e93258fSBjoern A. Zeeb return; 1514*8e93258fSBjoern A. Zeeb 1515*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_TXCH_NUM; i++) { 1516*8e93258fSBjoern A. Zeeb tx_ring = &rtwpci->tx_rings[i]; 1517*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.addr.idx = low_power ? 1518*8e93258fSBjoern A. Zeeb bd_idx_addr->tx_bd_addrs[i] : 1519*8e93258fSBjoern A. Zeeb dma_addr_set->tx[i].idx; 1520*8e93258fSBjoern A. Zeeb } 1521*8e93258fSBjoern A. Zeeb 1522*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_RXCH_NUM; i++) { 1523*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[i]; 1524*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.addr.idx = low_power ? 1525*8e93258fSBjoern A. Zeeb bd_idx_addr->rx_bd_addrs[i] : 1526*8e93258fSBjoern A. Zeeb dma_addr_set->rx[i].idx; 1527*8e93258fSBjoern A. Zeeb } 1528*8e93258fSBjoern A. Zeeb } 1529*8e93258fSBjoern A. Zeeb 1530*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power) 1531*8e93258fSBjoern A. Zeeb { 1532*8e93258fSBjoern A. Zeeb enum rtw89_pci_intr_mask_cfg cfg; 1533*8e93258fSBjoern A. Zeeb 1534*8e93258fSBjoern A. Zeeb WARN(!rtwdev->hci.paused, "HCI isn't paused\n"); 1535*8e93258fSBjoern A. Zeeb 1536*8e93258fSBjoern A. Zeeb cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL; 1537*8e93258fSBjoern A. Zeeb rtw89_chip_config_intr_mask(rtwdev, cfg); 1538*8e93258fSBjoern A. Zeeb rtw89_pci_switch_bd_idx_addr(rtwdev, low_power); 1539*8e93258fSBjoern A. Zeeb } 1540*8e93258fSBjoern A. Zeeb 1541*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data); 1542*8e93258fSBjoern A. Zeeb 1543*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr) 1544*8e93258fSBjoern A. Zeeb { 1545*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1546*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1547*8e93258fSBjoern A. Zeeb u32 val = readl(rtwpci->mmap + addr); 1548*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1549*8e93258fSBjoern A. Zeeb u32 val; 1550*8e93258fSBjoern A. Zeeb 1551*8e93258fSBjoern A. Zeeb val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1552*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1553*8e93258fSBjoern A. Zeeb #endif 1554*8e93258fSBjoern A. Zeeb int count; 1555*8e93258fSBjoern A. Zeeb 1556*8e93258fSBjoern A. Zeeb for (count = 0; ; count++) { 1557*8e93258fSBjoern A. Zeeb if (val != RTW89_R32_DEAD) 1558*8e93258fSBjoern A. Zeeb return val; 1559*8e93258fSBjoern A. Zeeb if (count >= MAC_REG_POOL_COUNT) { 1560*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val); 1561*8e93258fSBjoern A. Zeeb return RTW89_R32_DEAD; 1562*8e93258fSBjoern A. Zeeb } 1563*8e93258fSBjoern A. Zeeb rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN); 1564*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1565*8e93258fSBjoern A. Zeeb val = readl(rtwpci->mmap + addr); 1566*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1567*8e93258fSBjoern A. Zeeb val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1568*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1569*8e93258fSBjoern A. Zeeb #endif 1570*8e93258fSBjoern A. Zeeb } 1571*8e93258fSBjoern A. Zeeb 1572*8e93258fSBjoern A. Zeeb return val; 1573*8e93258fSBjoern A. Zeeb } 1574*8e93258fSBjoern A. Zeeb 1575*8e93258fSBjoern A. Zeeb static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr) 1576*8e93258fSBjoern A. Zeeb { 1577*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1578*8e93258fSBjoern A. Zeeb u32 addr32, val32, shift; 1579*8e93258fSBjoern A. Zeeb 1580*8e93258fSBjoern A. Zeeb if (!ACCESS_CMAC(addr)) 1581*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1582*8e93258fSBjoern A. Zeeb return readb(rtwpci->mmap + addr); 1583*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1584*8e93258fSBjoern A. Zeeb { 1585*8e93258fSBjoern A. Zeeb u8 val; 1586*8e93258fSBjoern A. Zeeb 1587*8e93258fSBjoern A. Zeeb val = bus_read_1((struct resource *)rtwpci->mmap, addr); 1588*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); 1589*8e93258fSBjoern A. Zeeb return (val); 1590*8e93258fSBjoern A. Zeeb } 1591*8e93258fSBjoern A. Zeeb #endif 1592*8e93258fSBjoern A. Zeeb 1593*8e93258fSBjoern A. Zeeb addr32 = addr & ~0x3; 1594*8e93258fSBjoern A. Zeeb shift = (addr & 0x3) * 8; 1595*8e93258fSBjoern A. Zeeb val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1596*8e93258fSBjoern A. Zeeb return val32 >> shift; 1597*8e93258fSBjoern A. Zeeb } 1598*8e93258fSBjoern A. Zeeb 1599*8e93258fSBjoern A. Zeeb static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr) 1600*8e93258fSBjoern A. Zeeb { 1601*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1602*8e93258fSBjoern A. Zeeb u32 addr32, val32, shift; 1603*8e93258fSBjoern A. Zeeb 1604*8e93258fSBjoern A. Zeeb if (!ACCESS_CMAC(addr)) 1605*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1606*8e93258fSBjoern A. Zeeb return readw(rtwpci->mmap + addr); 1607*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1608*8e93258fSBjoern A. Zeeb { 1609*8e93258fSBjoern A. Zeeb u16 val; 1610*8e93258fSBjoern A. Zeeb 1611*8e93258fSBjoern A. Zeeb val = bus_read_2((struct resource *)rtwpci->mmap, addr); 1612*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); 1613*8e93258fSBjoern A. Zeeb return (val); 1614*8e93258fSBjoern A. Zeeb } 1615*8e93258fSBjoern A. Zeeb #endif 1616*8e93258fSBjoern A. Zeeb 1617*8e93258fSBjoern A. Zeeb addr32 = addr & ~0x3; 1618*8e93258fSBjoern A. Zeeb shift = (addr & 0x3) * 8; 1619*8e93258fSBjoern A. Zeeb val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32); 1620*8e93258fSBjoern A. Zeeb return val32 >> shift; 1621*8e93258fSBjoern A. Zeeb } 1622*8e93258fSBjoern A. Zeeb 1623*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr) 1624*8e93258fSBjoern A. Zeeb { 1625*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1626*8e93258fSBjoern A. Zeeb 1627*8e93258fSBjoern A. Zeeb if (!ACCESS_CMAC(addr)) 1628*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1629*8e93258fSBjoern A. Zeeb return readl(rtwpci->mmap + addr); 1630*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1631*8e93258fSBjoern A. Zeeb { 1632*8e93258fSBjoern A. Zeeb u32 val; 1633*8e93258fSBjoern A. Zeeb 1634*8e93258fSBjoern A. Zeeb val = bus_read_4((struct resource *)rtwpci->mmap, addr); 1635*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); 1636*8e93258fSBjoern A. Zeeb return (val); 1637*8e93258fSBjoern A. Zeeb } 1638*8e93258fSBjoern A. Zeeb #endif 1639*8e93258fSBjoern A. Zeeb 1640*8e93258fSBjoern A. Zeeb return rtw89_pci_ops_read32_cmac(rtwdev, addr); 1641*8e93258fSBjoern A. Zeeb } 1642*8e93258fSBjoern A. Zeeb 1643*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data) 1644*8e93258fSBjoern A. Zeeb { 1645*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1646*8e93258fSBjoern A. Zeeb 1647*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1648*8e93258fSBjoern A. Zeeb writeb(data, rtwpci->mmap + addr); 1649*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1650*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, data); 1651*8e93258fSBjoern A. Zeeb return (bus_write_1((struct resource *)rtwpci->mmap, addr, data)); 1652*8e93258fSBjoern A. Zeeb #endif 1653*8e93258fSBjoern A. Zeeb } 1654*8e93258fSBjoern A. Zeeb 1655*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data) 1656*8e93258fSBjoern A. Zeeb { 1657*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1658*8e93258fSBjoern A. Zeeb 1659*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1660*8e93258fSBjoern A. Zeeb writew(data, rtwpci->mmap + addr); 1661*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1662*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, data); 1663*8e93258fSBjoern A. Zeeb return (bus_write_2((struct resource *)rtwpci->mmap, addr, data)); 1664*8e93258fSBjoern A. Zeeb #endif 1665*8e93258fSBjoern A. Zeeb } 1666*8e93258fSBjoern A. Zeeb 1667*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data) 1668*8e93258fSBjoern A. Zeeb { 1669*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1670*8e93258fSBjoern A. Zeeb 1671*8e93258fSBjoern A. Zeeb #if defined(__linux__) 1672*8e93258fSBjoern A. Zeeb writel(data, rtwpci->mmap + addr); 1673*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 1674*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, data); 1675*8e93258fSBjoern A. Zeeb return (bus_write_4((struct resource *)rtwpci->mmap, addr, data)); 1676*8e93258fSBjoern A. Zeeb #endif 1677*8e93258fSBjoern A. Zeeb } 1678*8e93258fSBjoern A. Zeeb 1679*8e93258fSBjoern A. Zeeb static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable) 1680*8e93258fSBjoern A. Zeeb { 1681*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 1682*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 1683*8e93258fSBjoern A. Zeeb u32 txhci_en = info->txhci_en_bit; 1684*8e93258fSBjoern A. Zeeb u32 rxhci_en = info->rxhci_en_bit; 1685*8e93258fSBjoern A. Zeeb 1686*8e93258fSBjoern A. Zeeb if (enable) { 1687*8e93258fSBjoern A. Zeeb if (chip_id != RTL8852C) 1688*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, info->dma_stop1_reg, 1689*8e93258fSBjoern A. Zeeb B_AX_STOP_PCIEIO); 1690*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1691*8e93258fSBjoern A. Zeeb txhci_en | rxhci_en); 1692*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852C) 1693*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1694*8e93258fSBjoern A. Zeeb B_AX_STOP_AXI_MST); 1695*8e93258fSBjoern A. Zeeb } else { 1696*8e93258fSBjoern A. Zeeb if (chip_id != RTL8852C) 1697*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, info->dma_stop1_reg, 1698*8e93258fSBjoern A. Zeeb B_AX_STOP_PCIEIO); 1699*8e93258fSBjoern A. Zeeb else 1700*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1701*8e93258fSBjoern A. Zeeb B_AX_STOP_AXI_MST); 1702*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 1703*8e93258fSBjoern A. Zeeb txhci_en | rxhci_en); 1704*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852C) 1705*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 1706*8e93258fSBjoern A. Zeeb B_AX_STOP_AXI_MST); 1707*8e93258fSBjoern A. Zeeb } 1708*8e93258fSBjoern A. Zeeb } 1709*8e93258fSBjoern A. Zeeb 1710*8e93258fSBjoern A. Zeeb static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit) 1711*8e93258fSBjoern A. Zeeb { 1712*8e93258fSBjoern A. Zeeb u16 val; 1713*8e93258fSBjoern A. Zeeb 1714*8e93258fSBjoern A. Zeeb rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F); 1715*8e93258fSBjoern A. Zeeb 1716*8e93258fSBjoern A. Zeeb val = rtw89_read16(rtwdev, R_AX_MDIO_CFG); 1717*8e93258fSBjoern A. Zeeb switch (speed) { 1718*8e93258fSBjoern A. Zeeb case PCIE_PHY_GEN1: 1719*8e93258fSBjoern A. Zeeb if (addr < 0x20) 1720*8e93258fSBjoern A. Zeeb val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK); 1721*8e93258fSBjoern A. Zeeb else 1722*8e93258fSBjoern A. Zeeb val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK); 1723*8e93258fSBjoern A. Zeeb break; 1724*8e93258fSBjoern A. Zeeb case PCIE_PHY_GEN2: 1725*8e93258fSBjoern A. Zeeb if (addr < 0x20) 1726*8e93258fSBjoern A. Zeeb val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK); 1727*8e93258fSBjoern A. Zeeb else 1728*8e93258fSBjoern A. Zeeb val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK); 1729*8e93258fSBjoern A. Zeeb break; 1730*8e93258fSBjoern A. Zeeb default: 1731*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed); 1732*8e93258fSBjoern A. Zeeb return -EINVAL; 1733*8e93258fSBjoern A. Zeeb } 1734*8e93258fSBjoern A. Zeeb rtw89_write16(rtwdev, R_AX_MDIO_CFG, val); 1735*8e93258fSBjoern A. Zeeb rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit); 1736*8e93258fSBjoern A. Zeeb 1737*8e93258fSBjoern A. Zeeb return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000, 1738*8e93258fSBjoern A. Zeeb false, rtwdev, R_AX_MDIO_CFG); 1739*8e93258fSBjoern A. Zeeb } 1740*8e93258fSBjoern A. Zeeb 1741*8e93258fSBjoern A. Zeeb static int 1742*8e93258fSBjoern A. Zeeb rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val) 1743*8e93258fSBjoern A. Zeeb { 1744*8e93258fSBjoern A. Zeeb int ret; 1745*8e93258fSBjoern A. Zeeb 1746*8e93258fSBjoern A. Zeeb ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG); 1747*8e93258fSBjoern A. Zeeb if (ret) { 1748*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret); 1749*8e93258fSBjoern A. Zeeb return ret; 1750*8e93258fSBjoern A. Zeeb } 1751*8e93258fSBjoern A. Zeeb *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA); 1752*8e93258fSBjoern A. Zeeb 1753*8e93258fSBjoern A. Zeeb return 0; 1754*8e93258fSBjoern A. Zeeb } 1755*8e93258fSBjoern A. Zeeb 1756*8e93258fSBjoern A. Zeeb static int 1757*8e93258fSBjoern A. Zeeb rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed) 1758*8e93258fSBjoern A. Zeeb { 1759*8e93258fSBjoern A. Zeeb int ret; 1760*8e93258fSBjoern A. Zeeb 1761*8e93258fSBjoern A. Zeeb rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data); 1762*8e93258fSBjoern A. Zeeb ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG); 1763*8e93258fSBjoern A. Zeeb if (ret) { 1764*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret); 1765*8e93258fSBjoern A. Zeeb return ret; 1766*8e93258fSBjoern A. Zeeb } 1767*8e93258fSBjoern A. Zeeb 1768*8e93258fSBjoern A. Zeeb return 0; 1769*8e93258fSBjoern A. Zeeb } 1770*8e93258fSBjoern A. Zeeb 1771*8e93258fSBjoern A. Zeeb static int 1772*8e93258fSBjoern A. Zeeb rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed) 1773*8e93258fSBjoern A. Zeeb { 1774*8e93258fSBjoern A. Zeeb u32 shift; 1775*8e93258fSBjoern A. Zeeb int ret; 1776*8e93258fSBjoern A. Zeeb u16 val; 1777*8e93258fSBjoern A. Zeeb 1778*8e93258fSBjoern A. Zeeb ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1779*8e93258fSBjoern A. Zeeb if (ret) 1780*8e93258fSBjoern A. Zeeb return ret; 1781*8e93258fSBjoern A. Zeeb 1782*8e93258fSBjoern A. Zeeb shift = __ffs(mask); 1783*8e93258fSBjoern A. Zeeb val &= ~mask; 1784*8e93258fSBjoern A. Zeeb val |= ((data << shift) & mask); 1785*8e93258fSBjoern A. Zeeb 1786*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, addr, val, speed); 1787*8e93258fSBjoern A. Zeeb if (ret) 1788*8e93258fSBjoern A. Zeeb return ret; 1789*8e93258fSBjoern A. Zeeb 1790*8e93258fSBjoern A. Zeeb return 0; 1791*8e93258fSBjoern A. Zeeb } 1792*8e93258fSBjoern A. Zeeb 1793*8e93258fSBjoern A. Zeeb static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1794*8e93258fSBjoern A. Zeeb { 1795*8e93258fSBjoern A. Zeeb int ret; 1796*8e93258fSBjoern A. Zeeb u16 val; 1797*8e93258fSBjoern A. Zeeb 1798*8e93258fSBjoern A. Zeeb ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1799*8e93258fSBjoern A. Zeeb if (ret) 1800*8e93258fSBjoern A. Zeeb return ret; 1801*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed); 1802*8e93258fSBjoern A. Zeeb if (ret) 1803*8e93258fSBjoern A. Zeeb return ret; 1804*8e93258fSBjoern A. Zeeb 1805*8e93258fSBjoern A. Zeeb return 0; 1806*8e93258fSBjoern A. Zeeb } 1807*8e93258fSBjoern A. Zeeb 1808*8e93258fSBjoern A. Zeeb static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed) 1809*8e93258fSBjoern A. Zeeb { 1810*8e93258fSBjoern A. Zeeb int ret; 1811*8e93258fSBjoern A. Zeeb u16 val; 1812*8e93258fSBjoern A. Zeeb 1813*8e93258fSBjoern A. Zeeb ret = rtw89_read16_mdio(rtwdev, addr, speed, &val); 1814*8e93258fSBjoern A. Zeeb if (ret) 1815*8e93258fSBjoern A. Zeeb return ret; 1816*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed); 1817*8e93258fSBjoern A. Zeeb if (ret) 1818*8e93258fSBjoern A. Zeeb return ret; 1819*8e93258fSBjoern A. Zeeb 1820*8e93258fSBjoern A. Zeeb return 0; 1821*8e93258fSBjoern A. Zeeb } 1822*8e93258fSBjoern A. Zeeb 1823*8e93258fSBjoern A. Zeeb static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1824*8e93258fSBjoern A. Zeeb u8 data) 1825*8e93258fSBjoern A. Zeeb { 1826*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1827*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 1828*8e93258fSBjoern A. Zeeb 1829*8e93258fSBjoern A. Zeeb return pci_write_config_byte(pdev, addr, data); 1830*8e93258fSBjoern A. Zeeb } 1831*8e93258fSBjoern A. Zeeb 1832*8e93258fSBjoern A. Zeeb static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, 1833*8e93258fSBjoern A. Zeeb u8 *value) 1834*8e93258fSBjoern A. Zeeb { 1835*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 1836*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 1837*8e93258fSBjoern A. Zeeb 1838*8e93258fSBjoern A. Zeeb return pci_read_config_byte(pdev, addr, value); 1839*8e93258fSBjoern A. Zeeb } 1840*8e93258fSBjoern A. Zeeb 1841*8e93258fSBjoern A. Zeeb static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr, 1842*8e93258fSBjoern A. Zeeb u8 bit) 1843*8e93258fSBjoern A. Zeeb { 1844*8e93258fSBjoern A. Zeeb u8 value; 1845*8e93258fSBjoern A. Zeeb int ret; 1846*8e93258fSBjoern A. Zeeb 1847*8e93258fSBjoern A. Zeeb ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1848*8e93258fSBjoern A. Zeeb if (ret) 1849*8e93258fSBjoern A. Zeeb return ret; 1850*8e93258fSBjoern A. Zeeb 1851*8e93258fSBjoern A. Zeeb value |= bit; 1852*8e93258fSBjoern A. Zeeb ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1853*8e93258fSBjoern A. Zeeb 1854*8e93258fSBjoern A. Zeeb return ret; 1855*8e93258fSBjoern A. Zeeb } 1856*8e93258fSBjoern A. Zeeb 1857*8e93258fSBjoern A. Zeeb static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr, 1858*8e93258fSBjoern A. Zeeb u8 bit) 1859*8e93258fSBjoern A. Zeeb { 1860*8e93258fSBjoern A. Zeeb u8 value; 1861*8e93258fSBjoern A. Zeeb int ret; 1862*8e93258fSBjoern A. Zeeb 1863*8e93258fSBjoern A. Zeeb ret = rtw89_pci_read_config_byte(rtwdev, addr, &value); 1864*8e93258fSBjoern A. Zeeb if (ret) 1865*8e93258fSBjoern A. Zeeb return ret; 1866*8e93258fSBjoern A. Zeeb 1867*8e93258fSBjoern A. Zeeb value &= ~bit; 1868*8e93258fSBjoern A. Zeeb ret = rtw89_pci_write_config_byte(rtwdev, addr, value); 1869*8e93258fSBjoern A. Zeeb 1870*8e93258fSBjoern A. Zeeb return ret; 1871*8e93258fSBjoern A. Zeeb } 1872*8e93258fSBjoern A. Zeeb 1873*8e93258fSBjoern A. Zeeb static int 1874*8e93258fSBjoern A. Zeeb __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate) 1875*8e93258fSBjoern A. Zeeb { 1876*8e93258fSBjoern A. Zeeb u16 val, tar; 1877*8e93258fSBjoern A. Zeeb int ret; 1878*8e93258fSBjoern A. Zeeb 1879*8e93258fSBjoern A. Zeeb /* Enable counter */ 1880*8e93258fSBjoern A. Zeeb ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val); 1881*8e93258fSBjoern A. Zeeb if (ret) 1882*8e93258fSBjoern A. Zeeb return ret; 1883*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1884*8e93258fSBjoern A. Zeeb phy_rate); 1885*8e93258fSBjoern A. Zeeb if (ret) 1886*8e93258fSBjoern A. Zeeb return ret; 1887*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN, 1888*8e93258fSBjoern A. Zeeb phy_rate); 1889*8e93258fSBjoern A. Zeeb if (ret) 1890*8e93258fSBjoern A. Zeeb return ret; 1891*8e93258fSBjoern A. Zeeb 1892*8e93258fSBjoern A. Zeeb fsleep(300); 1893*8e93258fSBjoern A. Zeeb 1894*8e93258fSBjoern A. Zeeb ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar); 1895*8e93258fSBjoern A. Zeeb if (ret) 1896*8e93258fSBjoern A. Zeeb return ret; 1897*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN, 1898*8e93258fSBjoern A. Zeeb phy_rate); 1899*8e93258fSBjoern A. Zeeb if (ret) 1900*8e93258fSBjoern A. Zeeb return ret; 1901*8e93258fSBjoern A. Zeeb 1902*8e93258fSBjoern A. Zeeb tar = tar & 0x0FFF; 1903*8e93258fSBjoern A. Zeeb if (tar == 0 || tar == 0x0FFF) { 1904*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]Get target failed.\n"); 1905*8e93258fSBjoern A. Zeeb return -EINVAL; 1906*8e93258fSBjoern A. Zeeb } 1907*8e93258fSBjoern A. Zeeb 1908*8e93258fSBjoern A. Zeeb *target = tar; 1909*8e93258fSBjoern A. Zeeb 1910*8e93258fSBjoern A. Zeeb return 0; 1911*8e93258fSBjoern A. Zeeb } 1912*8e93258fSBjoern A. Zeeb 1913*8e93258fSBjoern A. Zeeb static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) 1914*8e93258fSBjoern A. Zeeb { 1915*8e93258fSBjoern A. Zeeb enum rtw89_pcie_phy phy_rate; 1916*8e93258fSBjoern A. Zeeb u16 val16, mgn_set, div_set, tar; 1917*8e93258fSBjoern A. Zeeb u8 val8, bdr_ori; 1918*8e93258fSBjoern A. Zeeb bool l1_flag = false; 1919*8e93258fSBjoern A. Zeeb int ret = 0; 1920*8e93258fSBjoern A. Zeeb 1921*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852B) 1922*8e93258fSBjoern A. Zeeb return 0; 1923*8e93258fSBjoern A. Zeeb 1924*8e93258fSBjoern A. Zeeb ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); 1925*8e93258fSBjoern A. Zeeb if (ret) { 1926*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]pci config read %X\n", 1927*8e93258fSBjoern A. Zeeb RTW89_PCIE_PHY_RATE); 1928*8e93258fSBjoern A. Zeeb return ret; 1929*8e93258fSBjoern A. Zeeb } 1930*8e93258fSBjoern A. Zeeb 1931*8e93258fSBjoern A. Zeeb if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) { 1932*8e93258fSBjoern A. Zeeb phy_rate = PCIE_PHY_GEN1; 1933*8e93258fSBjoern A. Zeeb } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) { 1934*8e93258fSBjoern A. Zeeb phy_rate = PCIE_PHY_GEN2; 1935*8e93258fSBjoern A. Zeeb } else { 1936*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8); 1937*8e93258fSBjoern A. Zeeb return -EOPNOTSUPP; 1938*8e93258fSBjoern A. Zeeb } 1939*8e93258fSBjoern A. Zeeb /* Disable L1BD */ 1940*8e93258fSBjoern A. Zeeb ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori); 1941*8e93258fSBjoern A. Zeeb if (ret) { 1942*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL); 1943*8e93258fSBjoern A. Zeeb return ret; 1944*8e93258fSBjoern A. Zeeb } 1945*8e93258fSBjoern A. Zeeb 1946*8e93258fSBjoern A. Zeeb if (bdr_ori & RTW89_PCIE_BIT_L1) { 1947*8e93258fSBjoern A. Zeeb ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 1948*8e93258fSBjoern A. Zeeb bdr_ori & ~RTW89_PCIE_BIT_L1); 1949*8e93258fSBjoern A. Zeeb if (ret) { 1950*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]pci config write %X\n", 1951*8e93258fSBjoern A. Zeeb RTW89_PCIE_L1_CTRL); 1952*8e93258fSBjoern A. Zeeb return ret; 1953*8e93258fSBjoern A. Zeeb } 1954*8e93258fSBjoern A. Zeeb l1_flag = true; 1955*8e93258fSBjoern A. Zeeb } 1956*8e93258fSBjoern A. Zeeb 1957*8e93258fSBjoern A. Zeeb ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 1958*8e93258fSBjoern A. Zeeb if (ret) { 1959*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 1960*8e93258fSBjoern A. Zeeb goto end; 1961*8e93258fSBjoern A. Zeeb } 1962*8e93258fSBjoern A. Zeeb 1963*8e93258fSBjoern A. Zeeb if (val16 & B_AX_CALIB_EN) { 1964*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, 1965*8e93258fSBjoern A. Zeeb val16 & ~B_AX_CALIB_EN, phy_rate); 1966*8e93258fSBjoern A. Zeeb if (ret) { 1967*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1968*8e93258fSBjoern A. Zeeb goto end; 1969*8e93258fSBjoern A. Zeeb } 1970*8e93258fSBjoern A. Zeeb } 1971*8e93258fSBjoern A. Zeeb 1972*8e93258fSBjoern A. Zeeb if (!autook_en) 1973*8e93258fSBjoern A. Zeeb goto end; 1974*8e93258fSBjoern A. Zeeb /* Set div */ 1975*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate); 1976*8e93258fSBjoern A. Zeeb if (ret) { 1977*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 1978*8e93258fSBjoern A. Zeeb goto end; 1979*8e93258fSBjoern A. Zeeb } 1980*8e93258fSBjoern A. Zeeb 1981*8e93258fSBjoern A. Zeeb /* Obtain div and margin */ 1982*8e93258fSBjoern A. Zeeb ret = __get_target(rtwdev, &tar, phy_rate); 1983*8e93258fSBjoern A. Zeeb if (ret) { 1984*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret); 1985*8e93258fSBjoern A. Zeeb goto end; 1986*8e93258fSBjoern A. Zeeb } 1987*8e93258fSBjoern A. Zeeb 1988*8e93258fSBjoern A. Zeeb mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar; 1989*8e93258fSBjoern A. Zeeb 1990*8e93258fSBjoern A. Zeeb if (mgn_set >= 128) { 1991*8e93258fSBjoern A. Zeeb div_set = 0x0003; 1992*8e93258fSBjoern A. Zeeb mgn_set = 0x000F; 1993*8e93258fSBjoern A. Zeeb } else if (mgn_set >= 64) { 1994*8e93258fSBjoern A. Zeeb div_set = 0x0003; 1995*8e93258fSBjoern A. Zeeb mgn_set >>= 3; 1996*8e93258fSBjoern A. Zeeb } else if (mgn_set >= 32) { 1997*8e93258fSBjoern A. Zeeb div_set = 0x0002; 1998*8e93258fSBjoern A. Zeeb mgn_set >>= 2; 1999*8e93258fSBjoern A. Zeeb } else if (mgn_set >= 16) { 2000*8e93258fSBjoern A. Zeeb div_set = 0x0001; 2001*8e93258fSBjoern A. Zeeb mgn_set >>= 1; 2002*8e93258fSBjoern A. Zeeb } else if (mgn_set == 0) { 2003*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar); 2004*8e93258fSBjoern A. Zeeb goto end; 2005*8e93258fSBjoern A. Zeeb } else { 2006*8e93258fSBjoern A. Zeeb div_set = 0x0000; 2007*8e93258fSBjoern A. Zeeb } 2008*8e93258fSBjoern A. Zeeb 2009*8e93258fSBjoern A. Zeeb ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16); 2010*8e93258fSBjoern A. Zeeb if (ret) { 2011*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1); 2012*8e93258fSBjoern A. Zeeb goto end; 2013*8e93258fSBjoern A. Zeeb } 2014*8e93258fSBjoern A. Zeeb 2015*8e93258fSBjoern A. Zeeb val16 |= u16_encode_bits(div_set, B_AX_DIV); 2016*8e93258fSBjoern A. Zeeb 2017*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate); 2018*8e93258fSBjoern A. Zeeb if (ret) { 2019*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2020*8e93258fSBjoern A. Zeeb goto end; 2021*8e93258fSBjoern A. Zeeb } 2022*8e93258fSBjoern A. Zeeb 2023*8e93258fSBjoern A. Zeeb ret = __get_target(rtwdev, &tar, phy_rate); 2024*8e93258fSBjoern A. Zeeb if (ret) { 2025*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret); 2026*8e93258fSBjoern A. Zeeb goto end; 2027*8e93258fSBjoern A. Zeeb } 2028*8e93258fSBjoern A. Zeeb 2029*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n", 2030*8e93258fSBjoern A. Zeeb tar, div_set, mgn_set); 2031*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1, 2032*8e93258fSBjoern A. Zeeb (tar & 0x0FFF) | (mgn_set << 12), phy_rate); 2033*8e93258fSBjoern A. Zeeb if (ret) { 2034*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1); 2035*8e93258fSBjoern A. Zeeb goto end; 2036*8e93258fSBjoern A. Zeeb } 2037*8e93258fSBjoern A. Zeeb 2038*8e93258fSBjoern A. Zeeb /* Enable function */ 2039*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate); 2040*8e93258fSBjoern A. Zeeb if (ret) { 2041*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1); 2042*8e93258fSBjoern A. Zeeb goto end; 2043*8e93258fSBjoern A. Zeeb } 2044*8e93258fSBjoern A. Zeeb 2045*8e93258fSBjoern A. Zeeb /* CLK delay = 0 */ 2046*8e93258fSBjoern A. Zeeb ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 2047*8e93258fSBjoern A. Zeeb PCIE_CLKDLY_HW_0); 2048*8e93258fSBjoern A. Zeeb 2049*8e93258fSBjoern A. Zeeb end: 2050*8e93258fSBjoern A. Zeeb /* Set L1BD to ori */ 2051*8e93258fSBjoern A. Zeeb if (l1_flag) { 2052*8e93258fSBjoern A. Zeeb ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, 2053*8e93258fSBjoern A. Zeeb bdr_ori); 2054*8e93258fSBjoern A. Zeeb if (ret) { 2055*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR]pci config write %X\n", 2056*8e93258fSBjoern A. Zeeb RTW89_PCIE_L1_CTRL); 2057*8e93258fSBjoern A. Zeeb return ret; 2058*8e93258fSBjoern A. Zeeb } 2059*8e93258fSBjoern A. Zeeb } 2060*8e93258fSBjoern A. Zeeb 2061*8e93258fSBjoern A. Zeeb return ret; 2062*8e93258fSBjoern A. Zeeb } 2063*8e93258fSBjoern A. Zeeb 2064*8e93258fSBjoern A. Zeeb static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev) 2065*8e93258fSBjoern A. Zeeb { 2066*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2067*8e93258fSBjoern A. Zeeb int ret; 2068*8e93258fSBjoern A. Zeeb 2069*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A) { 2070*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2071*8e93258fSBjoern A. Zeeb PCIE_PHY_GEN1); 2072*8e93258fSBjoern A. Zeeb if (ret) 2073*8e93258fSBjoern A. Zeeb return ret; 2074*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH, 2075*8e93258fSBjoern A. Zeeb PCIE_PHY_GEN2); 2076*8e93258fSBjoern A. Zeeb if (ret) 2077*8e93258fSBjoern A. Zeeb return ret; 2078*8e93258fSBjoern A. Zeeb } else if (chip_id == RTL8852C) { 2079*8e93258fSBjoern A. Zeeb rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2, 2080*8e93258fSBjoern A. Zeeb B_AX_DEGLITCH); 2081*8e93258fSBjoern A. Zeeb rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2, 2082*8e93258fSBjoern A. Zeeb B_AX_DEGLITCH); 2083*8e93258fSBjoern A. Zeeb } 2084*8e93258fSBjoern A. Zeeb 2085*8e93258fSBjoern A. Zeeb return 0; 2086*8e93258fSBjoern A. Zeeb } 2087*8e93258fSBjoern A. Zeeb 2088*8e93258fSBjoern A. Zeeb static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev) 2089*8e93258fSBjoern A. Zeeb { 2090*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852A) 2091*8e93258fSBjoern A. Zeeb return; 2092*8e93258fSBjoern A. Zeeb 2093*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE); 2094*8e93258fSBjoern A. Zeeb } 2095*8e93258fSBjoern A. Zeeb 2096*8e93258fSBjoern A. Zeeb static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) 2097*8e93258fSBjoern A. Zeeb { 2098*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852A && rtwdev->chip->chip_id != RTL8852B) 2099*8e93258fSBjoern A. Zeeb return; 2100*8e93258fSBjoern A. Zeeb 2101*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); 2102*8e93258fSBjoern A. Zeeb } 2103*8e93258fSBjoern A. Zeeb 2104*8e93258fSBjoern A. Zeeb static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev) 2105*8e93258fSBjoern A. Zeeb { 2106*8e93258fSBjoern A. Zeeb int ret; 2107*8e93258fSBjoern A. Zeeb 2108*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852A) 2109*8e93258fSBjoern A. Zeeb return 0; 2110*8e93258fSBjoern A. Zeeb 2111*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2112*8e93258fSBjoern A. Zeeb PCIE_PHY_GEN1); 2113*8e93258fSBjoern A. Zeeb if (ret) 2114*8e93258fSBjoern A. Zeeb return ret; 2115*8e93258fSBjoern A. Zeeb 2116*8e93258fSBjoern A. Zeeb ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN, 2117*8e93258fSBjoern A. Zeeb PCIE_PHY_GEN2); 2118*8e93258fSBjoern A. Zeeb if (ret) 2119*8e93258fSBjoern A. Zeeb return ret; 2120*8e93258fSBjoern A. Zeeb 2121*8e93258fSBjoern A. Zeeb return 0; 2122*8e93258fSBjoern A. Zeeb } 2123*8e93258fSBjoern A. Zeeb 2124*8e93258fSBjoern A. Zeeb static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) 2125*8e93258fSBjoern A. Zeeb { 2126*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852A) 2127*8e93258fSBjoern A. Zeeb return; 2128*8e93258fSBjoern A. Zeeb 2129*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); 2130*8e93258fSBjoern A. Zeeb } 2131*8e93258fSBjoern A. Zeeb 2132*8e93258fSBjoern A. Zeeb static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) 2133*8e93258fSBjoern A. Zeeb { 2134*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id == RTL8852A || 2135*8e93258fSBjoern A. Zeeb rtwdev->chip->chip_id == RTL8852B) { 2136*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 2137*8e93258fSBjoern A. Zeeb B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2138*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2139*8e93258fSBjoern A. Zeeb B_AX_PCIE_DIS_WLSUS_AFT_PDN); 2140*8e93258fSBjoern A. Zeeb } else if (rtwdev->chip->chip_id == RTL8852C) { 2141*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 2142*8e93258fSBjoern A. Zeeb B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 2143*8e93258fSBjoern A. Zeeb } 2144*8e93258fSBjoern A. Zeeb } 2145*8e93258fSBjoern A. Zeeb 2146*8e93258fSBjoern A. Zeeb static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) 2147*8e93258fSBjoern A. Zeeb { 2148*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852B) 2149*8e93258fSBjoern A. Zeeb return 0; 2150*8e93258fSBjoern A. Zeeb 2151*8e93258fSBjoern A. Zeeb return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, 2152*8e93258fSBjoern A. Zeeb PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1); 2153*8e93258fSBjoern A. Zeeb } 2154*8e93258fSBjoern A. Zeeb 2155*8e93258fSBjoern A. Zeeb static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up) 2156*8e93258fSBjoern A. Zeeb { 2157*8e93258fSBjoern A. Zeeb if (pwr_up) 2158*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2159*8e93258fSBjoern A. Zeeb else 2160*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL); 2161*8e93258fSBjoern A. Zeeb } 2162*8e93258fSBjoern A. Zeeb 2163*8e93258fSBjoern A. Zeeb static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev) 2164*8e93258fSBjoern A. Zeeb { 2165*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852C) 2166*8e93258fSBjoern A. Zeeb return; 2167*8e93258fSBjoern A. Zeeb 2168*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2169*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3); 2170*8e93258fSBjoern A. Zeeb } 2171*8e93258fSBjoern A. Zeeb 2172*8e93258fSBjoern A. Zeeb static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev) 2173*8e93258fSBjoern A. Zeeb { 2174*8e93258fSBjoern A. Zeeb if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2175*8e93258fSBjoern A. Zeeb return; 2176*8e93258fSBjoern A. Zeeb 2177*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT); 2178*8e93258fSBjoern A. Zeeb } 2179*8e93258fSBjoern A. Zeeb 2180*8e93258fSBjoern A. Zeeb static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev) 2181*8e93258fSBjoern A. Zeeb { 2182*8e93258fSBjoern A. Zeeb if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV)) 2183*8e93258fSBjoern A. Zeeb return; 2184*8e93258fSBjoern A. Zeeb 2185*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2, 2186*8e93258fSBjoern A. Zeeb B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2187*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3); 2188*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2, 2189*8e93258fSBjoern A. Zeeb B_AX_SYSON_DIS_PMCR_AX_WRMSK); 2190*8e93258fSBjoern A. Zeeb } 2191*8e93258fSBjoern A. Zeeb 2192*8e93258fSBjoern A. Zeeb static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev) 2193*8e93258fSBjoern A. Zeeb { 2194*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852C) 2195*8e93258fSBjoern A. Zeeb return; 2196*8e93258fSBjoern A. Zeeb 2197*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1); 2198*8e93258fSBjoern A. Zeeb } 2199*8e93258fSBjoern A. Zeeb 2200*8e93258fSBjoern A. Zeeb static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev) 2201*8e93258fSBjoern A. Zeeb { 2202*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852C) 2203*8e93258fSBjoern A. Zeeb return; 2204*8e93258fSBjoern A. Zeeb 2205*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN); 2206*8e93258fSBjoern A. Zeeb } 2207*8e93258fSBjoern A. Zeeb 2208*8e93258fSBjoern A. Zeeb static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev) 2209*8e93258fSBjoern A. Zeeb { 2210*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id == RTL8852C) 2211*8e93258fSBjoern A. Zeeb return; 2212*8e93258fSBjoern A. Zeeb 2213*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL, 2214*8e93258fSBjoern A. Zeeb B_AX_SIC_EN_FORCE_CLKREQ); 2215*8e93258fSBjoern A. Zeeb } 2216*8e93258fSBjoern A. Zeeb 2217*8e93258fSBjoern A. Zeeb static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev) 2218*8e93258fSBjoern A. Zeeb { 2219*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2220*8e93258fSBjoern A. Zeeb u32 lbc; 2221*8e93258fSBjoern A. Zeeb 2222*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id == RTL8852C) 2223*8e93258fSBjoern A. Zeeb return; 2224*8e93258fSBjoern A. Zeeb 2225*8e93258fSBjoern A. Zeeb lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG); 2226*8e93258fSBjoern A. Zeeb if (info->lbc_en == MAC_AX_PCIE_ENABLE) { 2227*8e93258fSBjoern A. Zeeb lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER); 2228*8e93258fSBjoern A. Zeeb lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN; 2229*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2230*8e93258fSBjoern A. Zeeb } else { 2231*8e93258fSBjoern A. Zeeb lbc &= ~B_AX_LBC_EN; 2232*8e93258fSBjoern A. Zeeb } 2233*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc); 2234*8e93258fSBjoern A. Zeeb } 2235*8e93258fSBjoern A. Zeeb 2236*8e93258fSBjoern A. Zeeb static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev) 2237*8e93258fSBjoern A. Zeeb { 2238*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2239*8e93258fSBjoern A. Zeeb u32 val32; 2240*8e93258fSBjoern A. Zeeb 2241*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852C) 2242*8e93258fSBjoern A. Zeeb return; 2243*8e93258fSBjoern A. Zeeb 2244*8e93258fSBjoern A. Zeeb if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) { 2245*8e93258fSBjoern A. Zeeb val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK, 2246*8e93258fSBjoern A. Zeeb info->io_rcy_tmr); 2247*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32); 2248*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32); 2249*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32); 2250*8e93258fSBjoern A. Zeeb 2251*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2252*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2253*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2254*8e93258fSBjoern A. Zeeb } else { 2255*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1); 2256*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2); 2257*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0); 2258*8e93258fSBjoern A. Zeeb } 2259*8e93258fSBjoern A. Zeeb 2260*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1); 2261*8e93258fSBjoern A. Zeeb } 2262*8e93258fSBjoern A. Zeeb 2263*8e93258fSBjoern A. Zeeb static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev) 2264*8e93258fSBjoern A. Zeeb { 2265*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id == RTL8852C) 2266*8e93258fSBjoern A. Zeeb return; 2267*8e93258fSBjoern A. Zeeb 2268*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL, 2269*8e93258fSBjoern A. Zeeb B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG); 2270*8e93258fSBjoern A. Zeeb 2271*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id == RTL8852A) 2272*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL, 2273*8e93258fSBjoern A. Zeeb B_AX_EN_CHKDSC_NO_RX_STUCK); 2274*8e93258fSBjoern A. Zeeb } 2275*8e93258fSBjoern A. Zeeb 2276*8e93258fSBjoern A. Zeeb static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev) 2277*8e93258fSBjoern A. Zeeb { 2278*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id == RTL8852C) 2279*8e93258fSBjoern A. Zeeb return; 2280*8e93258fSBjoern A. Zeeb 2281*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 2282*8e93258fSBjoern A. Zeeb B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG); 2283*8e93258fSBjoern A. Zeeb } 2284*8e93258fSBjoern A. Zeeb 2285*8e93258fSBjoern A. Zeeb static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev) 2286*8e93258fSBjoern A. Zeeb { 2287*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2288*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2289*8e93258fSBjoern A. Zeeb u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX | 2290*8e93258fSBjoern A. Zeeb B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX | 2291*8e93258fSBjoern A. Zeeb B_AX_CLR_CH12_IDX; 2292*8e93258fSBjoern A. Zeeb u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg; 2293*8e93258fSBjoern A. Zeeb u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg; 2294*8e93258fSBjoern A. Zeeb 2295*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852C) 2296*8e93258fSBjoern A. Zeeb val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX | 2297*8e93258fSBjoern A. Zeeb B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX; 2298*8e93258fSBjoern A. Zeeb /* clear DMA indexes */ 2299*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val); 2300*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852C) 2301*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, txbd_rwptr_clr2, 2302*8e93258fSBjoern A. Zeeb B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX); 2303*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, rxbd_rwptr_clr, 2304*8e93258fSBjoern A. Zeeb B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX); 2305*8e93258fSBjoern A. Zeeb } 2306*8e93258fSBjoern A. Zeeb 2307*8e93258fSBjoern A. Zeeb static int rtw89_poll_txdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2308*8e93258fSBjoern A. Zeeb { 2309*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2310*8e93258fSBjoern A. Zeeb u32 ret, check, dma_busy; 2311*8e93258fSBjoern A. Zeeb u32 dma_busy1 = info->dma_busy1_reg; 2312*8e93258fSBjoern A. Zeeb u32 dma_busy2 = info->dma_busy2_reg; 2313*8e93258fSBjoern A. Zeeb 2314*8e93258fSBjoern A. Zeeb check = B_AX_ACH0_BUSY | B_AX_ACH1_BUSY | B_AX_ACH2_BUSY | 2315*8e93258fSBjoern A. Zeeb B_AX_ACH3_BUSY | B_AX_ACH4_BUSY | B_AX_ACH5_BUSY | 2316*8e93258fSBjoern A. Zeeb B_AX_ACH6_BUSY | B_AX_ACH7_BUSY | B_AX_CH8_BUSY | 2317*8e93258fSBjoern A. Zeeb B_AX_CH9_BUSY | B_AX_CH12_BUSY; 2318*8e93258fSBjoern A. Zeeb 2319*8e93258fSBjoern A. Zeeb ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2320*8e93258fSBjoern A. Zeeb 10, 100, false, rtwdev, dma_busy1); 2321*8e93258fSBjoern A. Zeeb if (ret) 2322*8e93258fSBjoern A. Zeeb return ret; 2323*8e93258fSBjoern A. Zeeb 2324*8e93258fSBjoern A. Zeeb check = B_AX_CH10_BUSY | B_AX_CH11_BUSY; 2325*8e93258fSBjoern A. Zeeb 2326*8e93258fSBjoern A. Zeeb ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2327*8e93258fSBjoern A. Zeeb 10, 100, false, rtwdev, dma_busy2); 2328*8e93258fSBjoern A. Zeeb if (ret) 2329*8e93258fSBjoern A. Zeeb return ret; 2330*8e93258fSBjoern A. Zeeb 2331*8e93258fSBjoern A. Zeeb return 0; 2332*8e93258fSBjoern A. Zeeb } 2333*8e93258fSBjoern A. Zeeb 2334*8e93258fSBjoern A. Zeeb static int rtw89_poll_rxdma_ch_idle_pcie(struct rtw89_dev *rtwdev) 2335*8e93258fSBjoern A. Zeeb { 2336*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2337*8e93258fSBjoern A. Zeeb u32 ret, check, dma_busy; 2338*8e93258fSBjoern A. Zeeb u32 dma_busy3 = info->dma_busy3_reg; 2339*8e93258fSBjoern A. Zeeb 2340*8e93258fSBjoern A. Zeeb check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY; 2341*8e93258fSBjoern A. Zeeb 2342*8e93258fSBjoern A. Zeeb ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0, 2343*8e93258fSBjoern A. Zeeb 10, 100, false, rtwdev, dma_busy3); 2344*8e93258fSBjoern A. Zeeb if (ret) 2345*8e93258fSBjoern A. Zeeb return ret; 2346*8e93258fSBjoern A. Zeeb 2347*8e93258fSBjoern A. Zeeb return 0; 2348*8e93258fSBjoern A. Zeeb } 2349*8e93258fSBjoern A. Zeeb 2350*8e93258fSBjoern A. Zeeb static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev) 2351*8e93258fSBjoern A. Zeeb { 2352*8e93258fSBjoern A. Zeeb u32 ret; 2353*8e93258fSBjoern A. Zeeb 2354*8e93258fSBjoern A. Zeeb ret = rtw89_poll_txdma_ch_idle_pcie(rtwdev); 2355*8e93258fSBjoern A. Zeeb if (ret) { 2356*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "txdma ch busy\n"); 2357*8e93258fSBjoern A. Zeeb return ret; 2358*8e93258fSBjoern A. Zeeb } 2359*8e93258fSBjoern A. Zeeb 2360*8e93258fSBjoern A. Zeeb ret = rtw89_poll_rxdma_ch_idle_pcie(rtwdev); 2361*8e93258fSBjoern A. Zeeb if (ret) { 2362*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "rxdma ch busy\n"); 2363*8e93258fSBjoern A. Zeeb return ret; 2364*8e93258fSBjoern A. Zeeb } 2365*8e93258fSBjoern A. Zeeb 2366*8e93258fSBjoern A. Zeeb return 0; 2367*8e93258fSBjoern A. Zeeb } 2368*8e93258fSBjoern A. Zeeb 2369*8e93258fSBjoern A. Zeeb static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) 2370*8e93258fSBjoern A. Zeeb { 2371*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2372*8e93258fSBjoern A. Zeeb enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode; 2373*8e93258fSBjoern A. Zeeb enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode; 2374*8e93258fSBjoern A. Zeeb enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode; 2375*8e93258fSBjoern A. Zeeb enum mac_ax_tag_mode tag_mode = info->tag_mode; 2376*8e93258fSBjoern A. Zeeb enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl; 2377*8e93258fSBjoern A. Zeeb enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl; 2378*8e93258fSBjoern A. Zeeb enum mac_ax_tx_burst tx_burst = info->tx_burst; 2379*8e93258fSBjoern A. Zeeb enum mac_ax_rx_burst rx_burst = info->rx_burst; 2380*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2381*8e93258fSBjoern A. Zeeb u8 cv = rtwdev->hal.cv; 2382*8e93258fSBjoern A. Zeeb u32 val32; 2383*8e93258fSBjoern A. Zeeb 2384*8e93258fSBjoern A. Zeeb if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2385*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A && cv == CHIP_CBV) 2386*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2387*8e93258fSBjoern A. Zeeb } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2388*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) 2389*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE); 2390*8e93258fSBjoern A. Zeeb } 2391*8e93258fSBjoern A. Zeeb 2392*8e93258fSBjoern A. Zeeb if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) { 2393*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A && cv == CHIP_CBV) 2394*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2395*8e93258fSBjoern A. Zeeb } else if (rxbd_trunc_mode == MAC_AX_BD_NORM) { 2396*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) 2397*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE); 2398*8e93258fSBjoern A. Zeeb } 2399*8e93258fSBjoern A. Zeeb 2400*8e93258fSBjoern A. Zeeb if (rxbd_mode == MAC_AX_RXBD_PKT) { 2401*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2402*8e93258fSBjoern A. Zeeb } else if (rxbd_mode == MAC_AX_RXBD_SEP) { 2403*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit); 2404*8e93258fSBjoern A. Zeeb 2405*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) 2406*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, 2407*8e93258fSBjoern A. Zeeb B_AX_PCIE_RX_APPLEN_MASK, 0); 2408*8e93258fSBjoern A. Zeeb } 2409*8e93258fSBjoern A. Zeeb 2410*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) { 2411*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); 2412*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); 2413*8e93258fSBjoern A. Zeeb } else if (chip_id == RTL8852C) { 2414*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst); 2415*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); 2416*8e93258fSBjoern A. Zeeb } 2417*8e93258fSBjoern A. Zeeb 2418*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) { 2419*8e93258fSBjoern A. Zeeb if (tag_mode == MAC_AX_TAG_SGL) { 2420*8e93258fSBjoern A. Zeeb val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & 2421*8e93258fSBjoern A. Zeeb ~B_AX_LATENCY_CONTROL; 2422*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2423*8e93258fSBjoern A. Zeeb } else if (tag_mode == MAC_AX_TAG_MULTI) { 2424*8e93258fSBjoern A. Zeeb val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | 2425*8e93258fSBjoern A. Zeeb B_AX_LATENCY_CONTROL; 2426*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 2427*8e93258fSBjoern A. Zeeb } 2428*8e93258fSBjoern A. Zeeb } 2429*8e93258fSBjoern A. Zeeb 2430*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, 2431*8e93258fSBjoern A. Zeeb info->multi_tag_num); 2432*8e93258fSBjoern A. Zeeb 2433*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) { 2434*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, 2435*8e93258fSBjoern A. Zeeb wd_dma_idle_intvl); 2436*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, 2437*8e93258fSBjoern A. Zeeb wd_dma_act_intvl); 2438*8e93258fSBjoern A. Zeeb } else if (chip_id == RTL8852C) { 2439*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK, 2440*8e93258fSBjoern A. Zeeb wd_dma_idle_intvl); 2441*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK, 2442*8e93258fSBjoern A. Zeeb wd_dma_act_intvl); 2443*8e93258fSBjoern A. Zeeb } 2444*8e93258fSBjoern A. Zeeb 2445*8e93258fSBjoern A. Zeeb if (txbd_trunc_mode == MAC_AX_BD_TRUNC) { 2446*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2447*8e93258fSBjoern A. Zeeb B_AX_HOST_ADDR_INFO_8B_SEL); 2448*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2449*8e93258fSBjoern A. Zeeb } else if (txbd_trunc_mode == MAC_AX_BD_NORM) { 2450*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2451*8e93258fSBjoern A. Zeeb B_AX_HOST_ADDR_INFO_8B_SEL); 2452*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2453*8e93258fSBjoern A. Zeeb } 2454*8e93258fSBjoern A. Zeeb 2455*8e93258fSBjoern A. Zeeb return 0; 2456*8e93258fSBjoern A. Zeeb } 2457*8e93258fSBjoern A. Zeeb 2458*8e93258fSBjoern A. Zeeb static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev) 2459*8e93258fSBjoern A. Zeeb { 2460*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2461*8e93258fSBjoern A. Zeeb 2462*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id == RTL8852A) { 2463*8e93258fSBjoern A. Zeeb /* ltr sw trigger */ 2464*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE); 2465*8e93258fSBjoern A. Zeeb } 2466*8e93258fSBjoern A. Zeeb info->ltr_set(rtwdev, false); 2467*8e93258fSBjoern A. Zeeb rtw89_pci_ctrl_dma_all(rtwdev, false); 2468*8e93258fSBjoern A. Zeeb rtw89_pci_clr_idx_all(rtwdev); 2469*8e93258fSBjoern A. Zeeb 2470*8e93258fSBjoern A. Zeeb return 0; 2471*8e93258fSBjoern A. Zeeb } 2472*8e93258fSBjoern A. Zeeb 2473*8e93258fSBjoern A. Zeeb static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev) 2474*8e93258fSBjoern A. Zeeb { 2475*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2476*8e93258fSBjoern A. Zeeb int ret; 2477*8e93258fSBjoern A. Zeeb 2478*8e93258fSBjoern A. Zeeb rtw89_pci_rxdma_prefth(rtwdev); 2479*8e93258fSBjoern A. Zeeb rtw89_pci_l1off_pwroff(rtwdev); 2480*8e93258fSBjoern A. Zeeb rtw89_pci_deglitch_setting(rtwdev); 2481*8e93258fSBjoern A. Zeeb ret = rtw89_pci_l2_rxen_lat(rtwdev); 2482*8e93258fSBjoern A. Zeeb if (ret) { 2483*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret); 2484*8e93258fSBjoern A. Zeeb return ret; 2485*8e93258fSBjoern A. Zeeb } 2486*8e93258fSBjoern A. Zeeb 2487*8e93258fSBjoern A. Zeeb rtw89_pci_aphy_pwrcut(rtwdev); 2488*8e93258fSBjoern A. Zeeb rtw89_pci_hci_ldo(rtwdev); 2489*8e93258fSBjoern A. Zeeb rtw89_pci_dphy_delay(rtwdev); 2490*8e93258fSBjoern A. Zeeb 2491*8e93258fSBjoern A. Zeeb ret = rtw89_pci_auto_refclk_cal(rtwdev, false); 2492*8e93258fSBjoern A. Zeeb if (ret) { 2493*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret); 2494*8e93258fSBjoern A. Zeeb return ret; 2495*8e93258fSBjoern A. Zeeb } 2496*8e93258fSBjoern A. Zeeb 2497*8e93258fSBjoern A. Zeeb rtw89_pci_power_wake(rtwdev, true); 2498*8e93258fSBjoern A. Zeeb rtw89_pci_autoload_hang(rtwdev); 2499*8e93258fSBjoern A. Zeeb rtw89_pci_l12_vmain(rtwdev); 2500*8e93258fSBjoern A. Zeeb rtw89_pci_gen2_force_ib(rtwdev); 2501*8e93258fSBjoern A. Zeeb rtw89_pci_l1_ent_lat(rtwdev); 2502*8e93258fSBjoern A. Zeeb rtw89_pci_wd_exit_l1(rtwdev); 2503*8e93258fSBjoern A. Zeeb rtw89_pci_set_sic(rtwdev); 2504*8e93258fSBjoern A. Zeeb rtw89_pci_set_lbc(rtwdev); 2505*8e93258fSBjoern A. Zeeb rtw89_pci_set_io_rcy(rtwdev); 2506*8e93258fSBjoern A. Zeeb rtw89_pci_set_dbg(rtwdev); 2507*8e93258fSBjoern A. Zeeb rtw89_pci_set_keep_reg(rtwdev); 2508*8e93258fSBjoern A. Zeeb 2509*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_STOP_WPDMA); 2510*8e93258fSBjoern A. Zeeb 2511*8e93258fSBjoern A. Zeeb /* stop DMA activities */ 2512*8e93258fSBjoern A. Zeeb rtw89_pci_ctrl_dma_all(rtwdev, false); 2513*8e93258fSBjoern A. Zeeb 2514*8e93258fSBjoern A. Zeeb ret = rtw89_pci_poll_dma_all_idle(rtwdev); 2515*8e93258fSBjoern A. Zeeb if (ret) { 2516*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n"); 2517*8e93258fSBjoern A. Zeeb return ret; 2518*8e93258fSBjoern A. Zeeb } 2519*8e93258fSBjoern A. Zeeb 2520*8e93258fSBjoern A. Zeeb rtw89_pci_clr_idx_all(rtwdev); 2521*8e93258fSBjoern A. Zeeb rtw89_pci_mode_op(rtwdev); 2522*8e93258fSBjoern A. Zeeb 2523*8e93258fSBjoern A. Zeeb /* fill TRX BD indexes */ 2524*8e93258fSBjoern A. Zeeb rtw89_pci_ops_reset(rtwdev); 2525*8e93258fSBjoern A. Zeeb 2526*8e93258fSBjoern A. Zeeb ret = rtw89_pci_rst_bdram_pcie(rtwdev); 2527*8e93258fSBjoern A. Zeeb if (ret) { 2528*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "reset bdram busy\n"); 2529*8e93258fSBjoern A. Zeeb return ret; 2530*8e93258fSBjoern A. Zeeb } 2531*8e93258fSBjoern A. Zeeb 2532*8e93258fSBjoern A. Zeeb /* enable FW CMD queue to download firmware */ 2533*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); 2534*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_STOP_CH12); 2535*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); 2536*8e93258fSBjoern A. Zeeb 2537*8e93258fSBjoern A. Zeeb /* start DMA activities */ 2538*8e93258fSBjoern A. Zeeb rtw89_pci_ctrl_dma_all(rtwdev, true); 2539*8e93258fSBjoern A. Zeeb 2540*8e93258fSBjoern A. Zeeb return 0; 2541*8e93258fSBjoern A. Zeeb } 2542*8e93258fSBjoern A. Zeeb 2543*8e93258fSBjoern A. Zeeb int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en) 2544*8e93258fSBjoern A. Zeeb { 2545*8e93258fSBjoern A. Zeeb u32 val; 2546*8e93258fSBjoern A. Zeeb 2547*8e93258fSBjoern A. Zeeb if (!en) 2548*8e93258fSBjoern A. Zeeb return 0; 2549*8e93258fSBjoern A. Zeeb 2550*8e93258fSBjoern A. Zeeb val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2551*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(val)) 2552*8e93258fSBjoern A. Zeeb return -EINVAL; 2553*8e93258fSBjoern A. Zeeb val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2554*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(val)) 2555*8e93258fSBjoern A. Zeeb return -EINVAL; 2556*8e93258fSBjoern A. Zeeb val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY); 2557*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(val)) 2558*8e93258fSBjoern A. Zeeb return -EINVAL; 2559*8e93258fSBjoern A. Zeeb val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY); 2560*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(val)) 2561*8e93258fSBjoern A. Zeeb return -EINVAL; 2562*8e93258fSBjoern A. Zeeb 2563*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN); 2564*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN); 2565*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK, 2566*8e93258fSBjoern A. Zeeb PCI_LTR_SPC_500US); 2567*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2568*8e93258fSBjoern A. Zeeb PCI_LTR_IDLE_TIMER_800US); 2569*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2570*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2571*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0); 2572*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b); 2573*8e93258fSBjoern A. Zeeb 2574*8e93258fSBjoern A. Zeeb return 0; 2575*8e93258fSBjoern A. Zeeb } 2576*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_ltr_set); 2577*8e93258fSBjoern A. Zeeb 2578*8e93258fSBjoern A. Zeeb int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en) 2579*8e93258fSBjoern A. Zeeb { 2580*8e93258fSBjoern A. Zeeb u32 dec_ctrl; 2581*8e93258fSBjoern A. Zeeb u32 val32; 2582*8e93258fSBjoern A. Zeeb 2583*8e93258fSBjoern A. Zeeb val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0); 2584*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(val32)) 2585*8e93258fSBjoern A. Zeeb return -EINVAL; 2586*8e93258fSBjoern A. Zeeb val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1); 2587*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(val32)) 2588*8e93258fSBjoern A. Zeeb return -EINVAL; 2589*8e93258fSBjoern A. Zeeb dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL); 2590*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl)) 2591*8e93258fSBjoern A. Zeeb return -EINVAL; 2592*8e93258fSBjoern A. Zeeb val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3); 2593*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(val32)) 2594*8e93258fSBjoern A. Zeeb return -EINVAL; 2595*8e93258fSBjoern A. Zeeb val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0); 2596*8e93258fSBjoern A. Zeeb if (rtw89_pci_ltr_is_err_reg_val(val32)) 2597*8e93258fSBjoern A. Zeeb return -EINVAL; 2598*8e93258fSBjoern A. Zeeb 2599*8e93258fSBjoern A. Zeeb if (!en) { 2600*8e93258fSBjoern A. Zeeb dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN); 2601*8e93258fSBjoern A. Zeeb dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) | 2602*8e93258fSBjoern A. Zeeb B_AX_LTR_REQ_DRV; 2603*8e93258fSBjoern A. Zeeb } else { 2604*8e93258fSBjoern A. Zeeb dec_ctrl |= B_AX_LTR_HW_DEC_EN; 2605*8e93258fSBjoern A. Zeeb } 2606*8e93258fSBjoern A. Zeeb 2607*8e93258fSBjoern A. Zeeb dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK; 2608*8e93258fSBjoern A. Zeeb dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US); 2609*8e93258fSBjoern A. Zeeb 2610*8e93258fSBjoern A. Zeeb if (en) 2611*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, 2612*8e93258fSBjoern A. Zeeb B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN); 2613*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK, 2614*8e93258fSBjoern A. Zeeb PCI_LTR_IDLE_TIMER_3_2MS); 2615*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28); 2616*8e93258fSBjoern A. Zeeb rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28); 2617*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl); 2618*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003); 2619*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b); 2620*8e93258fSBjoern A. Zeeb 2621*8e93258fSBjoern A. Zeeb return 0; 2622*8e93258fSBjoern A. Zeeb } 2623*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_ltr_set_v1); 2624*8e93258fSBjoern A. Zeeb 2625*8e93258fSBjoern A. Zeeb static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev) 2626*8e93258fSBjoern A. Zeeb { 2627*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 2628*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 2629*8e93258fSBjoern A. Zeeb int ret; 2630*8e93258fSBjoern A. Zeeb 2631*8e93258fSBjoern A. Zeeb ret = info->ltr_set(rtwdev, true); 2632*8e93258fSBjoern A. Zeeb if (ret) { 2633*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "pci ltr set fail\n"); 2634*8e93258fSBjoern A. Zeeb return ret; 2635*8e93258fSBjoern A. Zeeb } 2636*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A) { 2637*8e93258fSBjoern A. Zeeb /* ltr sw trigger */ 2638*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); 2639*8e93258fSBjoern A. Zeeb } 2640*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) { 2641*8e93258fSBjoern A. Zeeb /* ADDR info 8-byte mode */ 2642*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, 2643*8e93258fSBjoern A. Zeeb B_AX_HOST_ADDR_INFO_8B_SEL); 2644*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH); 2645*8e93258fSBjoern A. Zeeb } 2646*8e93258fSBjoern A. Zeeb 2647*8e93258fSBjoern A. Zeeb /* enable DMA for all queues */ 2648*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, info->dma_stop1_reg, B_AX_TX_STOP1_ALL); 2649*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, info->dma_stop2_reg, B_AX_TX_STOP2_ALL); 2650*8e93258fSBjoern A. Zeeb 2651*8e93258fSBjoern A. Zeeb /* Release PCI IO */ 2652*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, info->dma_stop1_reg, 2653*8e93258fSBjoern A. Zeeb B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO); 2654*8e93258fSBjoern A. Zeeb 2655*8e93258fSBjoern A. Zeeb return 0; 2656*8e93258fSBjoern A. Zeeb } 2657*8e93258fSBjoern A. Zeeb 2658*8e93258fSBjoern A. Zeeb static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev, 2659*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 2660*8e93258fSBjoern A. Zeeb { 2661*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2662*8e93258fSBjoern A. Zeeb int ret; 2663*8e93258fSBjoern A. Zeeb 2664*8e93258fSBjoern A. Zeeb ret = pci_enable_device(pdev); 2665*8e93258fSBjoern A. Zeeb if (ret) { 2666*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to enable pci device\n"); 2667*8e93258fSBjoern A. Zeeb return ret; 2668*8e93258fSBjoern A. Zeeb } 2669*8e93258fSBjoern A. Zeeb 2670*8e93258fSBjoern A. Zeeb pci_set_master(pdev); 2671*8e93258fSBjoern A. Zeeb pci_set_drvdata(pdev, rtwdev->hw); 2672*8e93258fSBjoern A. Zeeb 2673*8e93258fSBjoern A. Zeeb rtwpci->pdev = pdev; 2674*8e93258fSBjoern A. Zeeb 2675*8e93258fSBjoern A. Zeeb return 0; 2676*8e93258fSBjoern A. Zeeb } 2677*8e93258fSBjoern A. Zeeb 2678*8e93258fSBjoern A. Zeeb static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, 2679*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 2680*8e93258fSBjoern A. Zeeb { 2681*8e93258fSBjoern A. Zeeb pci_clear_master(pdev); 2682*8e93258fSBjoern A. Zeeb pci_disable_device(pdev); 2683*8e93258fSBjoern A. Zeeb } 2684*8e93258fSBjoern A. Zeeb 2685*8e93258fSBjoern A. Zeeb static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, 2686*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 2687*8e93258fSBjoern A. Zeeb { 2688*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2689*8e93258fSBjoern A. Zeeb unsigned long resource_len; 2690*8e93258fSBjoern A. Zeeb u8 bar_id = 2; 2691*8e93258fSBjoern A. Zeeb int ret; 2692*8e93258fSBjoern A. Zeeb 2693*8e93258fSBjoern A. Zeeb ret = pci_request_regions(pdev, KBUILD_MODNAME); 2694*8e93258fSBjoern A. Zeeb if (ret) { 2695*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to request pci regions\n"); 2696*8e93258fSBjoern A. Zeeb goto err; 2697*8e93258fSBjoern A. Zeeb } 2698*8e93258fSBjoern A. Zeeb 2699*8e93258fSBjoern A. Zeeb ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2700*8e93258fSBjoern A. Zeeb if (ret) { 2701*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); 2702*8e93258fSBjoern A. Zeeb goto err_release_regions; 2703*8e93258fSBjoern A. Zeeb } 2704*8e93258fSBjoern A. Zeeb 2705*8e93258fSBjoern A. Zeeb ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2706*8e93258fSBjoern A. Zeeb if (ret) { 2707*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); 2708*8e93258fSBjoern A. Zeeb goto err_release_regions; 2709*8e93258fSBjoern A. Zeeb } 2710*8e93258fSBjoern A. Zeeb 2711*8e93258fSBjoern A. Zeeb #if defined(__FreeBSD__) 2712*8e93258fSBjoern A. Zeeb linuxkpi_pcim_want_to_use_bus_functions(pdev); 2713*8e93258fSBjoern A. Zeeb #endif 2714*8e93258fSBjoern A. Zeeb resource_len = pci_resource_len(pdev, bar_id); 2715*8e93258fSBjoern A. Zeeb rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len); 2716*8e93258fSBjoern A. Zeeb if (!rtwpci->mmap) { 2717*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to map pci io\n"); 2718*8e93258fSBjoern A. Zeeb ret = -EIO; 2719*8e93258fSBjoern A. Zeeb goto err_release_regions; 2720*8e93258fSBjoern A. Zeeb } 2721*8e93258fSBjoern A. Zeeb 2722*8e93258fSBjoern A. Zeeb return 0; 2723*8e93258fSBjoern A. Zeeb 2724*8e93258fSBjoern A. Zeeb err_release_regions: 2725*8e93258fSBjoern A. Zeeb pci_release_regions(pdev); 2726*8e93258fSBjoern A. Zeeb err: 2727*8e93258fSBjoern A. Zeeb return ret; 2728*8e93258fSBjoern A. Zeeb } 2729*8e93258fSBjoern A. Zeeb 2730*8e93258fSBjoern A. Zeeb static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev, 2731*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 2732*8e93258fSBjoern A. Zeeb { 2733*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2734*8e93258fSBjoern A. Zeeb 2735*8e93258fSBjoern A. Zeeb if (rtwpci->mmap) { 2736*8e93258fSBjoern A. Zeeb pci_iounmap(pdev, rtwpci->mmap); 2737*8e93258fSBjoern A. Zeeb pci_release_regions(pdev); 2738*8e93258fSBjoern A. Zeeb } 2739*8e93258fSBjoern A. Zeeb } 2740*8e93258fSBjoern A. Zeeb 2741*8e93258fSBjoern A. Zeeb static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev, 2742*8e93258fSBjoern A. Zeeb struct pci_dev *pdev, 2743*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring) 2744*8e93258fSBjoern A. Zeeb { 2745*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2746*8e93258fSBjoern A. Zeeb u8 *head = wd_ring->head; 2747*8e93258fSBjoern A. Zeeb dma_addr_t dma = wd_ring->dma; 2748*8e93258fSBjoern A. Zeeb u32 page_size = wd_ring->page_size; 2749*8e93258fSBjoern A. Zeeb u32 page_num = wd_ring->page_num; 2750*8e93258fSBjoern A. Zeeb u32 ring_sz = page_size * page_num; 2751*8e93258fSBjoern A. Zeeb 2752*8e93258fSBjoern A. Zeeb dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2753*8e93258fSBjoern A. Zeeb wd_ring->head = NULL; 2754*8e93258fSBjoern A. Zeeb } 2755*8e93258fSBjoern A. Zeeb 2756*8e93258fSBjoern A. Zeeb static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev, 2757*8e93258fSBjoern A. Zeeb struct pci_dev *pdev, 2758*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring) 2759*8e93258fSBjoern A. Zeeb { 2760*8e93258fSBjoern A. Zeeb int ring_sz; 2761*8e93258fSBjoern A. Zeeb u8 *head; 2762*8e93258fSBjoern A. Zeeb dma_addr_t dma; 2763*8e93258fSBjoern A. Zeeb 2764*8e93258fSBjoern A. Zeeb head = tx_ring->bd_ring.head; 2765*8e93258fSBjoern A. Zeeb dma = tx_ring->bd_ring.dma; 2766*8e93258fSBjoern A. Zeeb ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len; 2767*8e93258fSBjoern A. Zeeb dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2768*8e93258fSBjoern A. Zeeb 2769*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.head = NULL; 2770*8e93258fSBjoern A. Zeeb } 2771*8e93258fSBjoern A. Zeeb 2772*8e93258fSBjoern A. Zeeb static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev, 2773*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 2774*8e93258fSBjoern A. Zeeb { 2775*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2776*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring; 2777*8e93258fSBjoern A. Zeeb int i; 2778*8e93258fSBjoern A. Zeeb 2779*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_TXCH_NUM; i++) { 2780*8e93258fSBjoern A. Zeeb tx_ring = &rtwpci->tx_rings[i]; 2781*8e93258fSBjoern A. Zeeb rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2782*8e93258fSBjoern A. Zeeb rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2783*8e93258fSBjoern A. Zeeb } 2784*8e93258fSBjoern A. Zeeb } 2785*8e93258fSBjoern A. Zeeb 2786*8e93258fSBjoern A. Zeeb static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev, 2787*8e93258fSBjoern A. Zeeb struct pci_dev *pdev, 2788*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring) 2789*8e93258fSBjoern A. Zeeb { 2790*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_info *rx_info; 2791*8e93258fSBjoern A. Zeeb struct sk_buff *skb; 2792*8e93258fSBjoern A. Zeeb dma_addr_t dma; 2793*8e93258fSBjoern A. Zeeb u32 buf_sz; 2794*8e93258fSBjoern A. Zeeb u8 *head; 2795*8e93258fSBjoern A. Zeeb int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len; 2796*8e93258fSBjoern A. Zeeb int i; 2797*8e93258fSBjoern A. Zeeb 2798*8e93258fSBjoern A. Zeeb buf_sz = rx_ring->buf_sz; 2799*8e93258fSBjoern A. Zeeb for (i = 0; i < rx_ring->bd_ring.len; i++) { 2800*8e93258fSBjoern A. Zeeb skb = rx_ring->buf[i]; 2801*8e93258fSBjoern A. Zeeb if (!skb) 2802*8e93258fSBjoern A. Zeeb continue; 2803*8e93258fSBjoern A. Zeeb 2804*8e93258fSBjoern A. Zeeb rx_info = RTW89_PCI_RX_SKB_CB(skb); 2805*8e93258fSBjoern A. Zeeb dma = rx_info->dma; 2806*8e93258fSBjoern A. Zeeb dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 2807*8e93258fSBjoern A. Zeeb dev_kfree_skb(skb); 2808*8e93258fSBjoern A. Zeeb rx_ring->buf[i] = NULL; 2809*8e93258fSBjoern A. Zeeb } 2810*8e93258fSBjoern A. Zeeb 2811*8e93258fSBjoern A. Zeeb head = rx_ring->bd_ring.head; 2812*8e93258fSBjoern A. Zeeb dma = rx_ring->bd_ring.dma; 2813*8e93258fSBjoern A. Zeeb dma_free_coherent(&pdev->dev, ring_sz, head, dma); 2814*8e93258fSBjoern A. Zeeb 2815*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.head = NULL; 2816*8e93258fSBjoern A. Zeeb } 2817*8e93258fSBjoern A. Zeeb 2818*8e93258fSBjoern A. Zeeb static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev, 2819*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 2820*8e93258fSBjoern A. Zeeb { 2821*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2822*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring; 2823*8e93258fSBjoern A. Zeeb int i; 2824*8e93258fSBjoern A. Zeeb 2825*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_RXCH_NUM; i++) { 2826*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[i]; 2827*8e93258fSBjoern A. Zeeb rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 2828*8e93258fSBjoern A. Zeeb } 2829*8e93258fSBjoern A. Zeeb } 2830*8e93258fSBjoern A. Zeeb 2831*8e93258fSBjoern A. Zeeb static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev, 2832*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 2833*8e93258fSBjoern A. Zeeb { 2834*8e93258fSBjoern A. Zeeb rtw89_pci_free_rx_rings(rtwdev, pdev); 2835*8e93258fSBjoern A. Zeeb rtw89_pci_free_tx_rings(rtwdev, pdev); 2836*8e93258fSBjoern A. Zeeb } 2837*8e93258fSBjoern A. Zeeb 2838*8e93258fSBjoern A. Zeeb static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, 2839*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring, 2840*8e93258fSBjoern A. Zeeb struct sk_buff *skb, int buf_sz, u32 idx) 2841*8e93258fSBjoern A. Zeeb { 2842*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_info *rx_info; 2843*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_bd_32 *rx_bd; 2844*8e93258fSBjoern A. Zeeb dma_addr_t dma; 2845*8e93258fSBjoern A. Zeeb 2846*8e93258fSBjoern A. Zeeb if (!skb) 2847*8e93258fSBjoern A. Zeeb return -EINVAL; 2848*8e93258fSBjoern A. Zeeb 2849*8e93258fSBjoern A. Zeeb dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); 2850*8e93258fSBjoern A. Zeeb if (dma_mapping_error(&pdev->dev, dma)) 2851*8e93258fSBjoern A. Zeeb return -EBUSY; 2852*8e93258fSBjoern A. Zeeb 2853*8e93258fSBjoern A. Zeeb rx_info = RTW89_PCI_RX_SKB_CB(skb); 2854*8e93258fSBjoern A. Zeeb rx_bd = RTW89_PCI_RX_BD(rx_ring, idx); 2855*8e93258fSBjoern A. Zeeb 2856*8e93258fSBjoern A. Zeeb memset(rx_bd, 0, sizeof(*rx_bd)); 2857*8e93258fSBjoern A. Zeeb rx_bd->buf_size = cpu_to_le16(buf_sz); 2858*8e93258fSBjoern A. Zeeb rx_bd->dma = cpu_to_le32(dma); 2859*8e93258fSBjoern A. Zeeb rx_info->dma = dma; 2860*8e93258fSBjoern A. Zeeb 2861*8e93258fSBjoern A. Zeeb return 0; 2862*8e93258fSBjoern A. Zeeb } 2863*8e93258fSBjoern A. Zeeb 2864*8e93258fSBjoern A. Zeeb static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev, 2865*8e93258fSBjoern A. Zeeb struct pci_dev *pdev, 2866*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring, 2867*8e93258fSBjoern A. Zeeb enum rtw89_tx_channel txch) 2868*8e93258fSBjoern A. Zeeb { 2869*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring; 2870*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_wd *txwd; 2871*8e93258fSBjoern A. Zeeb dma_addr_t dma; 2872*8e93258fSBjoern A. Zeeb dma_addr_t cur_paddr; 2873*8e93258fSBjoern A. Zeeb u8 *head; 2874*8e93258fSBjoern A. Zeeb u8 *cur_vaddr; 2875*8e93258fSBjoern A. Zeeb u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE; 2876*8e93258fSBjoern A. Zeeb u32 page_num = RTW89_PCI_TXWD_NUM_MAX; 2877*8e93258fSBjoern A. Zeeb u32 ring_sz = page_size * page_num; 2878*8e93258fSBjoern A. Zeeb u32 page_offset; 2879*8e93258fSBjoern A. Zeeb int i; 2880*8e93258fSBjoern A. Zeeb 2881*8e93258fSBjoern A. Zeeb /* FWCMD queue doesn't use txwd as pages */ 2882*8e93258fSBjoern A. Zeeb if (txch == RTW89_TXCH_CH12) 2883*8e93258fSBjoern A. Zeeb return 0; 2884*8e93258fSBjoern A. Zeeb 2885*8e93258fSBjoern A. Zeeb head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2886*8e93258fSBjoern A. Zeeb if (!head) 2887*8e93258fSBjoern A. Zeeb return -ENOMEM; 2888*8e93258fSBjoern A. Zeeb 2889*8e93258fSBjoern A. Zeeb INIT_LIST_HEAD(&wd_ring->free_pages); 2890*8e93258fSBjoern A. Zeeb wd_ring->head = head; 2891*8e93258fSBjoern A. Zeeb wd_ring->dma = dma; 2892*8e93258fSBjoern A. Zeeb wd_ring->page_size = page_size; 2893*8e93258fSBjoern A. Zeeb wd_ring->page_num = page_num; 2894*8e93258fSBjoern A. Zeeb 2895*8e93258fSBjoern A. Zeeb page_offset = 0; 2896*8e93258fSBjoern A. Zeeb for (i = 0; i < page_num; i++) { 2897*8e93258fSBjoern A. Zeeb txwd = &wd_ring->pages[i]; 2898*8e93258fSBjoern A. Zeeb cur_paddr = dma + page_offset; 2899*8e93258fSBjoern A. Zeeb cur_vaddr = head + page_offset; 2900*8e93258fSBjoern A. Zeeb 2901*8e93258fSBjoern A. Zeeb skb_queue_head_init(&txwd->queue); 2902*8e93258fSBjoern A. Zeeb INIT_LIST_HEAD(&txwd->list); 2903*8e93258fSBjoern A. Zeeb txwd->paddr = cur_paddr; 2904*8e93258fSBjoern A. Zeeb txwd->vaddr = cur_vaddr; 2905*8e93258fSBjoern A. Zeeb txwd->len = page_size; 2906*8e93258fSBjoern A. Zeeb txwd->seq = i; 2907*8e93258fSBjoern A. Zeeb rtw89_pci_enqueue_txwd(tx_ring, txwd); 2908*8e93258fSBjoern A. Zeeb 2909*8e93258fSBjoern A. Zeeb page_offset += page_size; 2910*8e93258fSBjoern A. Zeeb } 2911*8e93258fSBjoern A. Zeeb 2912*8e93258fSBjoern A. Zeeb return 0; 2913*8e93258fSBjoern A. Zeeb } 2914*8e93258fSBjoern A. Zeeb 2915*8e93258fSBjoern A. Zeeb static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev, 2916*8e93258fSBjoern A. Zeeb struct pci_dev *pdev, 2917*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring, 2918*8e93258fSBjoern A. Zeeb u32 desc_size, u32 len, 2919*8e93258fSBjoern A. Zeeb enum rtw89_tx_channel txch) 2920*8e93258fSBjoern A. Zeeb { 2921*8e93258fSBjoern A. Zeeb const struct rtw89_pci_ch_dma_addr *txch_addr; 2922*8e93258fSBjoern A. Zeeb int ring_sz = desc_size * len; 2923*8e93258fSBjoern A. Zeeb u8 *head; 2924*8e93258fSBjoern A. Zeeb dma_addr_t dma; 2925*8e93258fSBjoern A. Zeeb int ret; 2926*8e93258fSBjoern A. Zeeb 2927*8e93258fSBjoern A. Zeeb ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch); 2928*8e93258fSBjoern A. Zeeb if (ret) { 2929*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch); 2930*8e93258fSBjoern A. Zeeb goto err; 2931*8e93258fSBjoern A. Zeeb } 2932*8e93258fSBjoern A. Zeeb 2933*8e93258fSBjoern A. Zeeb ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr); 2934*8e93258fSBjoern A. Zeeb if (ret) { 2935*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to get address of txch %d", txch); 2936*8e93258fSBjoern A. Zeeb goto err_free_wd_ring; 2937*8e93258fSBjoern A. Zeeb } 2938*8e93258fSBjoern A. Zeeb 2939*8e93258fSBjoern A. Zeeb head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 2940*8e93258fSBjoern A. Zeeb if (!head) { 2941*8e93258fSBjoern A. Zeeb ret = -ENOMEM; 2942*8e93258fSBjoern A. Zeeb goto err_free_wd_ring; 2943*8e93258fSBjoern A. Zeeb } 2944*8e93258fSBjoern A. Zeeb 2945*8e93258fSBjoern A. Zeeb INIT_LIST_HEAD(&tx_ring->busy_pages); 2946*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.head = head; 2947*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.dma = dma; 2948*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.len = len; 2949*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.desc_size = desc_size; 2950*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.addr = *txch_addr; 2951*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.wp = 0; 2952*8e93258fSBjoern A. Zeeb tx_ring->bd_ring.rp = 0; 2953*8e93258fSBjoern A. Zeeb tx_ring->txch = txch; 2954*8e93258fSBjoern A. Zeeb 2955*8e93258fSBjoern A. Zeeb return 0; 2956*8e93258fSBjoern A. Zeeb 2957*8e93258fSBjoern A. Zeeb err_free_wd_ring: 2958*8e93258fSBjoern A. Zeeb rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring); 2959*8e93258fSBjoern A. Zeeb err: 2960*8e93258fSBjoern A. Zeeb return ret; 2961*8e93258fSBjoern A. Zeeb } 2962*8e93258fSBjoern A. Zeeb 2963*8e93258fSBjoern A. Zeeb static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev, 2964*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 2965*8e93258fSBjoern A. Zeeb { 2966*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 2967*8e93258fSBjoern A. Zeeb struct rtw89_pci_tx_ring *tx_ring; 2968*8e93258fSBjoern A. Zeeb u32 desc_size; 2969*8e93258fSBjoern A. Zeeb u32 len; 2970*8e93258fSBjoern A. Zeeb u32 i, tx_allocated; 2971*8e93258fSBjoern A. Zeeb int ret; 2972*8e93258fSBjoern A. Zeeb 2973*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_TXCH_NUM; i++) { 2974*8e93258fSBjoern A. Zeeb tx_ring = &rtwpci->tx_rings[i]; 2975*8e93258fSBjoern A. Zeeb desc_size = sizeof(struct rtw89_pci_tx_bd_32); 2976*8e93258fSBjoern A. Zeeb len = RTW89_PCI_TXBD_NUM_MAX; 2977*8e93258fSBjoern A. Zeeb ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring, 2978*8e93258fSBjoern A. Zeeb desc_size, len, i); 2979*8e93258fSBjoern A. Zeeb if (ret) { 2980*8e93258fSBjoern A. Zeeb #if defined(__linux__) 2981*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i); 2982*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 2983*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to alloc tx ring %d: ret=%d\n", i, ret); 2984*8e93258fSBjoern A. Zeeb #endif 2985*8e93258fSBjoern A. Zeeb goto err_free; 2986*8e93258fSBjoern A. Zeeb } 2987*8e93258fSBjoern A. Zeeb } 2988*8e93258fSBjoern A. Zeeb 2989*8e93258fSBjoern A. Zeeb return 0; 2990*8e93258fSBjoern A. Zeeb 2991*8e93258fSBjoern A. Zeeb err_free: 2992*8e93258fSBjoern A. Zeeb tx_allocated = i; 2993*8e93258fSBjoern A. Zeeb for (i = 0; i < tx_allocated; i++) { 2994*8e93258fSBjoern A. Zeeb tx_ring = &rtwpci->tx_rings[i]; 2995*8e93258fSBjoern A. Zeeb rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring); 2996*8e93258fSBjoern A. Zeeb } 2997*8e93258fSBjoern A. Zeeb 2998*8e93258fSBjoern A. Zeeb return ret; 2999*8e93258fSBjoern A. Zeeb } 3000*8e93258fSBjoern A. Zeeb 3001*8e93258fSBjoern A. Zeeb static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev, 3002*8e93258fSBjoern A. Zeeb struct pci_dev *pdev, 3003*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring, 3004*8e93258fSBjoern A. Zeeb u32 desc_size, u32 len, u32 rxch) 3005*8e93258fSBjoern A. Zeeb { 3006*8e93258fSBjoern A. Zeeb const struct rtw89_pci_ch_dma_addr *rxch_addr; 3007*8e93258fSBjoern A. Zeeb struct sk_buff *skb; 3008*8e93258fSBjoern A. Zeeb u8 *head; 3009*8e93258fSBjoern A. Zeeb dma_addr_t dma; 3010*8e93258fSBjoern A. Zeeb int ring_sz = desc_size * len; 3011*8e93258fSBjoern A. Zeeb int buf_sz = RTW89_PCI_RX_BUF_SIZE; 3012*8e93258fSBjoern A. Zeeb int i, allocated; 3013*8e93258fSBjoern A. Zeeb int ret; 3014*8e93258fSBjoern A. Zeeb 3015*8e93258fSBjoern A. Zeeb ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr); 3016*8e93258fSBjoern A. Zeeb if (ret) { 3017*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to get address of rxch %d", rxch); 3018*8e93258fSBjoern A. Zeeb return ret; 3019*8e93258fSBjoern A. Zeeb } 3020*8e93258fSBjoern A. Zeeb 3021*8e93258fSBjoern A. Zeeb head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); 3022*8e93258fSBjoern A. Zeeb if (!head) { 3023*8e93258fSBjoern A. Zeeb ret = -ENOMEM; 3024*8e93258fSBjoern A. Zeeb goto err; 3025*8e93258fSBjoern A. Zeeb } 3026*8e93258fSBjoern A. Zeeb 3027*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.head = head; 3028*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.dma = dma; 3029*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.len = len; 3030*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.desc_size = desc_size; 3031*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.addr = *rxch_addr; 3032*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.wp = 0; 3033*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.rp = 0; 3034*8e93258fSBjoern A. Zeeb rx_ring->buf_sz = buf_sz; 3035*8e93258fSBjoern A. Zeeb rx_ring->diliver_skb = NULL; 3036*8e93258fSBjoern A. Zeeb rx_ring->diliver_desc.ready = false; 3037*8e93258fSBjoern A. Zeeb 3038*8e93258fSBjoern A. Zeeb for (i = 0; i < len; i++) { 3039*8e93258fSBjoern A. Zeeb skb = dev_alloc_skb(buf_sz); 3040*8e93258fSBjoern A. Zeeb if (!skb) { 3041*8e93258fSBjoern A. Zeeb ret = -ENOMEM; 3042*8e93258fSBjoern A. Zeeb goto err_free; 3043*8e93258fSBjoern A. Zeeb } 3044*8e93258fSBjoern A. Zeeb 3045*8e93258fSBjoern A. Zeeb memset(skb->data, 0, buf_sz); 3046*8e93258fSBjoern A. Zeeb rx_ring->buf[i] = skb; 3047*8e93258fSBjoern A. Zeeb ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb, 3048*8e93258fSBjoern A. Zeeb buf_sz, i); 3049*8e93258fSBjoern A. Zeeb if (ret) { 3050*8e93258fSBjoern A. Zeeb #if defined(__linux__) 3051*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to init rx buf %d\n", i); 3052*8e93258fSBjoern A. Zeeb #elif defined(__FreeBSD__) 3053*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to init rx buf %d ret=%d\n", i, ret); 3054*8e93258fSBjoern A. Zeeb #endif 3055*8e93258fSBjoern A. Zeeb dev_kfree_skb_any(skb); 3056*8e93258fSBjoern A. Zeeb rx_ring->buf[i] = NULL; 3057*8e93258fSBjoern A. Zeeb goto err_free; 3058*8e93258fSBjoern A. Zeeb } 3059*8e93258fSBjoern A. Zeeb } 3060*8e93258fSBjoern A. Zeeb 3061*8e93258fSBjoern A. Zeeb return 0; 3062*8e93258fSBjoern A. Zeeb 3063*8e93258fSBjoern A. Zeeb err_free: 3064*8e93258fSBjoern A. Zeeb allocated = i; 3065*8e93258fSBjoern A. Zeeb for (i = 0; i < allocated; i++) { 3066*8e93258fSBjoern A. Zeeb skb = rx_ring->buf[i]; 3067*8e93258fSBjoern A. Zeeb if (!skb) 3068*8e93258fSBjoern A. Zeeb continue; 3069*8e93258fSBjoern A. Zeeb dma = *((dma_addr_t *)skb->cb); 3070*8e93258fSBjoern A. Zeeb dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); 3071*8e93258fSBjoern A. Zeeb dev_kfree_skb(skb); 3072*8e93258fSBjoern A. Zeeb rx_ring->buf[i] = NULL; 3073*8e93258fSBjoern A. Zeeb } 3074*8e93258fSBjoern A. Zeeb 3075*8e93258fSBjoern A. Zeeb head = rx_ring->bd_ring.head; 3076*8e93258fSBjoern A. Zeeb dma = rx_ring->bd_ring.dma; 3077*8e93258fSBjoern A. Zeeb dma_free_coherent(&pdev->dev, ring_sz, head, dma); 3078*8e93258fSBjoern A. Zeeb 3079*8e93258fSBjoern A. Zeeb rx_ring->bd_ring.head = NULL; 3080*8e93258fSBjoern A. Zeeb err: 3081*8e93258fSBjoern A. Zeeb return ret; 3082*8e93258fSBjoern A. Zeeb } 3083*8e93258fSBjoern A. Zeeb 3084*8e93258fSBjoern A. Zeeb static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev, 3085*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 3086*8e93258fSBjoern A. Zeeb { 3087*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3088*8e93258fSBjoern A. Zeeb struct rtw89_pci_rx_ring *rx_ring; 3089*8e93258fSBjoern A. Zeeb u32 desc_size; 3090*8e93258fSBjoern A. Zeeb u32 len; 3091*8e93258fSBjoern A. Zeeb int i, rx_allocated; 3092*8e93258fSBjoern A. Zeeb int ret; 3093*8e93258fSBjoern A. Zeeb 3094*8e93258fSBjoern A. Zeeb for (i = 0; i < RTW89_RXCH_NUM; i++) { 3095*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[i]; 3096*8e93258fSBjoern A. Zeeb desc_size = sizeof(struct rtw89_pci_rx_bd_32); 3097*8e93258fSBjoern A. Zeeb len = RTW89_PCI_RXBD_NUM_MAX; 3098*8e93258fSBjoern A. Zeeb ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring, 3099*8e93258fSBjoern A. Zeeb desc_size, len, i); 3100*8e93258fSBjoern A. Zeeb if (ret) { 3101*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i); 3102*8e93258fSBjoern A. Zeeb goto err_free; 3103*8e93258fSBjoern A. Zeeb } 3104*8e93258fSBjoern A. Zeeb } 3105*8e93258fSBjoern A. Zeeb 3106*8e93258fSBjoern A. Zeeb return 0; 3107*8e93258fSBjoern A. Zeeb 3108*8e93258fSBjoern A. Zeeb err_free: 3109*8e93258fSBjoern A. Zeeb rx_allocated = i; 3110*8e93258fSBjoern A. Zeeb for (i = 0; i < rx_allocated; i++) { 3111*8e93258fSBjoern A. Zeeb rx_ring = &rtwpci->rx_rings[i]; 3112*8e93258fSBjoern A. Zeeb rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring); 3113*8e93258fSBjoern A. Zeeb } 3114*8e93258fSBjoern A. Zeeb 3115*8e93258fSBjoern A. Zeeb return ret; 3116*8e93258fSBjoern A. Zeeb } 3117*8e93258fSBjoern A. Zeeb 3118*8e93258fSBjoern A. Zeeb static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev, 3119*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 3120*8e93258fSBjoern A. Zeeb { 3121*8e93258fSBjoern A. Zeeb int ret; 3122*8e93258fSBjoern A. Zeeb 3123*8e93258fSBjoern A. Zeeb ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev); 3124*8e93258fSBjoern A. Zeeb if (ret) { 3125*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to alloc dma tx rings\n"); 3126*8e93258fSBjoern A. Zeeb goto err; 3127*8e93258fSBjoern A. Zeeb } 3128*8e93258fSBjoern A. Zeeb 3129*8e93258fSBjoern A. Zeeb ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev); 3130*8e93258fSBjoern A. Zeeb if (ret) { 3131*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to alloc dma rx rings\n"); 3132*8e93258fSBjoern A. Zeeb goto err_free_tx_rings; 3133*8e93258fSBjoern A. Zeeb } 3134*8e93258fSBjoern A. Zeeb 3135*8e93258fSBjoern A. Zeeb return 0; 3136*8e93258fSBjoern A. Zeeb 3137*8e93258fSBjoern A. Zeeb err_free_tx_rings: 3138*8e93258fSBjoern A. Zeeb rtw89_pci_free_tx_rings(rtwdev, pdev); 3139*8e93258fSBjoern A. Zeeb err: 3140*8e93258fSBjoern A. Zeeb return ret; 3141*8e93258fSBjoern A. Zeeb } 3142*8e93258fSBjoern A. Zeeb 3143*8e93258fSBjoern A. Zeeb static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev, 3144*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci) 3145*8e93258fSBjoern A. Zeeb { 3146*8e93258fSBjoern A. Zeeb skb_queue_head_init(&rtwpci->h2c_queue); 3147*8e93258fSBjoern A. Zeeb skb_queue_head_init(&rtwpci->h2c_release_queue); 3148*8e93258fSBjoern A. Zeeb } 3149*8e93258fSBjoern A. Zeeb 3150*8e93258fSBjoern A. Zeeb static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev, 3151*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 3152*8e93258fSBjoern A. Zeeb { 3153*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3154*8e93258fSBjoern A. Zeeb int ret; 3155*8e93258fSBjoern A. Zeeb 3156*8e93258fSBjoern A. Zeeb ret = rtw89_pci_setup_mapping(rtwdev, pdev); 3157*8e93258fSBjoern A. Zeeb if (ret) { 3158*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to setup pci mapping\n"); 3159*8e93258fSBjoern A. Zeeb goto err; 3160*8e93258fSBjoern A. Zeeb } 3161*8e93258fSBjoern A. Zeeb 3162*8e93258fSBjoern A. Zeeb ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev); 3163*8e93258fSBjoern A. Zeeb if (ret) { 3164*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to alloc pci trx rings\n"); 3165*8e93258fSBjoern A. Zeeb goto err_pci_unmap; 3166*8e93258fSBjoern A. Zeeb } 3167*8e93258fSBjoern A. Zeeb 3168*8e93258fSBjoern A. Zeeb rtw89_pci_h2c_init(rtwdev, rtwpci); 3169*8e93258fSBjoern A. Zeeb 3170*8e93258fSBjoern A. Zeeb spin_lock_init(&rtwpci->irq_lock); 3171*8e93258fSBjoern A. Zeeb spin_lock_init(&rtwpci->trx_lock); 3172*8e93258fSBjoern A. Zeeb 3173*8e93258fSBjoern A. Zeeb return 0; 3174*8e93258fSBjoern A. Zeeb 3175*8e93258fSBjoern A. Zeeb err_pci_unmap: 3176*8e93258fSBjoern A. Zeeb rtw89_pci_clear_mapping(rtwdev, pdev); 3177*8e93258fSBjoern A. Zeeb err: 3178*8e93258fSBjoern A. Zeeb return ret; 3179*8e93258fSBjoern A. Zeeb } 3180*8e93258fSBjoern A. Zeeb 3181*8e93258fSBjoern A. Zeeb static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev, 3182*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 3183*8e93258fSBjoern A. Zeeb { 3184*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3185*8e93258fSBjoern A. Zeeb 3186*8e93258fSBjoern A. Zeeb rtw89_pci_free_trx_rings(rtwdev, pdev); 3187*8e93258fSBjoern A. Zeeb rtw89_pci_clear_mapping(rtwdev, pdev); 3188*8e93258fSBjoern A. Zeeb rtw89_pci_release_fwcmd(rtwdev, rtwpci, 3189*8e93258fSBjoern A. Zeeb skb_queue_len(&rtwpci->h2c_queue), true); 3190*8e93258fSBjoern A. Zeeb } 3191*8e93258fSBjoern A. Zeeb 3192*8e93258fSBjoern A. Zeeb void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev) 3193*8e93258fSBjoern A. Zeeb { 3194*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3195*8e93258fSBjoern A. Zeeb 3196*8e93258fSBjoern A. Zeeb rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0; 3197*8e93258fSBjoern A. Zeeb 3198*8e93258fSBjoern A. Zeeb if (rtwpci->under_recovery) { 3199*8e93258fSBjoern A. Zeeb rtwpci->intrs[0] = B_AX_HS0ISR_IND_INT_EN; 3200*8e93258fSBjoern A. Zeeb rtwpci->intrs[1] = 0; 3201*8e93258fSBjoern A. Zeeb } else { 3202*8e93258fSBjoern A. Zeeb rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3203*8e93258fSBjoern A. Zeeb B_AX_RXDMA_INT_EN | 3204*8e93258fSBjoern A. Zeeb B_AX_RXP1DMA_INT_EN | 3205*8e93258fSBjoern A. Zeeb B_AX_RPQDMA_INT_EN | 3206*8e93258fSBjoern A. Zeeb B_AX_RXDMA_STUCK_INT_EN | 3207*8e93258fSBjoern A. Zeeb B_AX_RDU_INT_EN | 3208*8e93258fSBjoern A. Zeeb B_AX_RPQBD_FULL_INT_EN | 3209*8e93258fSBjoern A. Zeeb B_AX_HS0ISR_IND_INT_EN; 3210*8e93258fSBjoern A. Zeeb 3211*8e93258fSBjoern A. Zeeb rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN; 3212*8e93258fSBjoern A. Zeeb } 3213*8e93258fSBjoern A. Zeeb } 3214*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_config_intr_mask); 3215*8e93258fSBjoern A. Zeeb 3216*8e93258fSBjoern A. Zeeb static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev) 3217*8e93258fSBjoern A. Zeeb { 3218*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3219*8e93258fSBjoern A. Zeeb 3220*8e93258fSBjoern A. Zeeb rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN; 3221*8e93258fSBjoern A. Zeeb rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3222*8e93258fSBjoern A. Zeeb rtwpci->intrs[0] = 0; 3223*8e93258fSBjoern A. Zeeb rtwpci->intrs[1] = 0; 3224*8e93258fSBjoern A. Zeeb } 3225*8e93258fSBjoern A. Zeeb 3226*8e93258fSBjoern A. Zeeb static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev) 3227*8e93258fSBjoern A. Zeeb { 3228*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3229*8e93258fSBjoern A. Zeeb 3230*8e93258fSBjoern A. Zeeb rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN | 3231*8e93258fSBjoern A. Zeeb B_AX_HS1ISR_IND_INT_EN | 3232*8e93258fSBjoern A. Zeeb B_AX_HS0ISR_IND_INT_EN; 3233*8e93258fSBjoern A. Zeeb rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3234*8e93258fSBjoern A. Zeeb rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN | 3235*8e93258fSBjoern A. Zeeb B_AX_RXDMA_INT_EN | 3236*8e93258fSBjoern A. Zeeb B_AX_RXP1DMA_INT_EN | 3237*8e93258fSBjoern A. Zeeb B_AX_RPQDMA_INT_EN | 3238*8e93258fSBjoern A. Zeeb B_AX_RXDMA_STUCK_INT_EN | 3239*8e93258fSBjoern A. Zeeb B_AX_RDU_INT_EN | 3240*8e93258fSBjoern A. Zeeb B_AX_RPQBD_FULL_INT_EN; 3241*8e93258fSBjoern A. Zeeb rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3242*8e93258fSBjoern A. Zeeb } 3243*8e93258fSBjoern A. Zeeb 3244*8e93258fSBjoern A. Zeeb static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev) 3245*8e93258fSBjoern A. Zeeb { 3246*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3247*8e93258fSBjoern A. Zeeb 3248*8e93258fSBjoern A. Zeeb rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN | 3249*8e93258fSBjoern A. Zeeb B_AX_HS0ISR_IND_INT_EN; 3250*8e93258fSBjoern A. Zeeb rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN; 3251*8e93258fSBjoern A. Zeeb rtwpci->intrs[0] = 0; 3252*8e93258fSBjoern A. Zeeb rtwpci->intrs[1] = B_AX_GPIO18_INT_EN; 3253*8e93258fSBjoern A. Zeeb } 3254*8e93258fSBjoern A. Zeeb 3255*8e93258fSBjoern A. Zeeb void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev) 3256*8e93258fSBjoern A. Zeeb { 3257*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3258*8e93258fSBjoern A. Zeeb 3259*8e93258fSBjoern A. Zeeb if (rtwpci->under_recovery) 3260*8e93258fSBjoern A. Zeeb rtw89_pci_recovery_intr_mask_v1(rtwdev); 3261*8e93258fSBjoern A. Zeeb else if (rtwpci->low_power) 3262*8e93258fSBjoern A. Zeeb rtw89_pci_low_power_intr_mask_v1(rtwdev); 3263*8e93258fSBjoern A. Zeeb else 3264*8e93258fSBjoern A. Zeeb rtw89_pci_default_intr_mask_v1(rtwdev); 3265*8e93258fSBjoern A. Zeeb } 3266*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1); 3267*8e93258fSBjoern A. Zeeb 3268*8e93258fSBjoern A. Zeeb static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev, 3269*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 3270*8e93258fSBjoern A. Zeeb { 3271*8e93258fSBjoern A. Zeeb unsigned long flags = 0; 3272*8e93258fSBjoern A. Zeeb int ret; 3273*8e93258fSBjoern A. Zeeb 3274*8e93258fSBjoern A. Zeeb flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI; 3275*8e93258fSBjoern A. Zeeb ret = pci_alloc_irq_vectors(pdev, 1, 1, flags); 3276*8e93258fSBjoern A. Zeeb if (ret < 0) { 3277*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret); 3278*8e93258fSBjoern A. Zeeb goto err; 3279*8e93258fSBjoern A. Zeeb } 3280*8e93258fSBjoern A. Zeeb 3281*8e93258fSBjoern A. Zeeb ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, 3282*8e93258fSBjoern A. Zeeb rtw89_pci_interrupt_handler, 3283*8e93258fSBjoern A. Zeeb rtw89_pci_interrupt_threadfn, 3284*8e93258fSBjoern A. Zeeb IRQF_SHARED, KBUILD_MODNAME, rtwdev); 3285*8e93258fSBjoern A. Zeeb if (ret) { 3286*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to request threaded irq\n"); 3287*8e93258fSBjoern A. Zeeb goto err_free_vector; 3288*8e93258fSBjoern A. Zeeb } 3289*8e93258fSBjoern A. Zeeb 3290*8e93258fSBjoern A. Zeeb rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET); 3291*8e93258fSBjoern A. Zeeb 3292*8e93258fSBjoern A. Zeeb return 0; 3293*8e93258fSBjoern A. Zeeb 3294*8e93258fSBjoern A. Zeeb err_free_vector: 3295*8e93258fSBjoern A. Zeeb pci_free_irq_vectors(pdev); 3296*8e93258fSBjoern A. Zeeb err: 3297*8e93258fSBjoern A. Zeeb return ret; 3298*8e93258fSBjoern A. Zeeb } 3299*8e93258fSBjoern A. Zeeb 3300*8e93258fSBjoern A. Zeeb static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev, 3301*8e93258fSBjoern A. Zeeb struct pci_dev *pdev) 3302*8e93258fSBjoern A. Zeeb { 3303*8e93258fSBjoern A. Zeeb devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); 3304*8e93258fSBjoern A. Zeeb pci_free_irq_vectors(pdev); 3305*8e93258fSBjoern A. Zeeb } 3306*8e93258fSBjoern A. Zeeb 3307*8e93258fSBjoern A. Zeeb static u16 gray_code_to_bin(u16 gray_code, u32 bit_num) 3308*8e93258fSBjoern A. Zeeb { 3309*8e93258fSBjoern A. Zeeb u16 bin = 0, gray_bit; 3310*8e93258fSBjoern A. Zeeb u32 bit_idx; 3311*8e93258fSBjoern A. Zeeb 3312*8e93258fSBjoern A. Zeeb for (bit_idx = 0; bit_idx < bit_num; bit_idx++) { 3313*8e93258fSBjoern A. Zeeb gray_bit = (gray_code >> bit_idx) & 0x1; 3314*8e93258fSBjoern A. Zeeb if (bit_num - bit_idx > 1) 3315*8e93258fSBjoern A. Zeeb gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1; 3316*8e93258fSBjoern A. Zeeb bin |= (gray_bit << bit_idx); 3317*8e93258fSBjoern A. Zeeb } 3318*8e93258fSBjoern A. Zeeb 3319*8e93258fSBjoern A. Zeeb return bin; 3320*8e93258fSBjoern A. Zeeb } 3321*8e93258fSBjoern A. Zeeb 3322*8e93258fSBjoern A. Zeeb static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev) 3323*8e93258fSBjoern A. Zeeb { 3324*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3325*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 3326*8e93258fSBjoern A. Zeeb u16 val16, filter_out_val; 3327*8e93258fSBjoern A. Zeeb u32 val, phy_offset; 3328*8e93258fSBjoern A. Zeeb int ret; 3329*8e93258fSBjoern A. Zeeb 3330*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id != RTL8852C) 3331*8e93258fSBjoern A. Zeeb return 0; 3332*8e93258fSBjoern A. Zeeb 3333*8e93258fSBjoern A. Zeeb val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); 3334*8e93258fSBjoern A. Zeeb if (val == B_AX_ASPM_CTRL_L1) 3335*8e93258fSBjoern A. Zeeb return 0; 3336*8e93258fSBjoern A. Zeeb 3337*8e93258fSBjoern A. Zeeb ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val); 3338*8e93258fSBjoern A. Zeeb if (ret) 3339*8e93258fSBjoern A. Zeeb return ret; 3340*8e93258fSBjoern A. Zeeb 3341*8e93258fSBjoern A. Zeeb val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val); 3342*8e93258fSBjoern A. Zeeb if (val == RTW89_PCIE_GEN1_SPEED) { 3343*8e93258fSBjoern A. Zeeb phy_offset = R_RAC_DIRECT_OFFSET_G1; 3344*8e93258fSBjoern A. Zeeb } else if (val == RTW89_PCIE_GEN2_SPEED) { 3345*8e93258fSBjoern A. Zeeb phy_offset = R_RAC_DIRECT_OFFSET_G2; 3346*8e93258fSBjoern A. Zeeb val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT); 3347*8e93258fSBjoern A. Zeeb rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, 3348*8e93258fSBjoern A. Zeeb val16 | B_PCIE_BIT_PINOUT_DIS); 3349*8e93258fSBjoern A. Zeeb rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, 3350*8e93258fSBjoern A. Zeeb val16 & ~B_PCIE_BIT_RD_SEL); 3351*8e93258fSBjoern A. Zeeb 3352*8e93258fSBjoern A. Zeeb val16 = rtw89_read16_mask(rtwdev, 3353*8e93258fSBjoern A. Zeeb phy_offset + RAC_ANA1F * RAC_MULT, 3354*8e93258fSBjoern A. Zeeb FILTER_OUT_EQ_MASK); 3355*8e93258fSBjoern A. Zeeb val16 = gray_code_to_bin(val16, hweight16(val16)); 3356*8e93258fSBjoern A. Zeeb filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 * 3357*8e93258fSBjoern A. Zeeb RAC_MULT); 3358*8e93258fSBjoern A. Zeeb filter_out_val &= ~REG_FILTER_OUT_MASK; 3359*8e93258fSBjoern A. Zeeb filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16); 3360*8e93258fSBjoern A. Zeeb 3361*8e93258fSBjoern A. Zeeb rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT, 3362*8e93258fSBjoern A. Zeeb filter_out_val); 3363*8e93258fSBjoern A. Zeeb rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT, 3364*8e93258fSBjoern A. Zeeb B_BAC_EQ_SEL); 3365*8e93258fSBjoern A. Zeeb rtw89_write16_set(rtwdev, 3366*8e93258fSBjoern A. Zeeb R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT, 3367*8e93258fSBjoern A. Zeeb B_PCIE_BIT_PSAVE); 3368*8e93258fSBjoern A. Zeeb } else { 3369*8e93258fSBjoern A. Zeeb return -EOPNOTSUPP; 3370*8e93258fSBjoern A. Zeeb } 3371*8e93258fSBjoern A. Zeeb rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT, 3372*8e93258fSBjoern A. Zeeb B_PCIE_BIT_PSAVE); 3373*8e93258fSBjoern A. Zeeb 3374*8e93258fSBjoern A. Zeeb return 0; 3375*8e93258fSBjoern A. Zeeb } 3376*8e93258fSBjoern A. Zeeb 3377*8e93258fSBjoern A. Zeeb static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable) 3378*8e93258fSBjoern A. Zeeb { 3379*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3380*8e93258fSBjoern A. Zeeb int ret; 3381*8e93258fSBjoern A. Zeeb 3382*8e93258fSBjoern A. Zeeb if (rtw89_pci_disable_clkreq) 3383*8e93258fSBjoern A. Zeeb return; 3384*8e93258fSBjoern A. Zeeb 3385*8e93258fSBjoern A. Zeeb ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL, 3386*8e93258fSBjoern A. Zeeb PCIE_CLKDLY_HW_30US); 3387*8e93258fSBjoern A. Zeeb if (ret) 3388*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); 3389*8e93258fSBjoern A. Zeeb 3390*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A) { 3391*8e93258fSBjoern A. Zeeb if (enable) 3392*8e93258fSBjoern A. Zeeb ret = rtw89_pci_config_byte_set(rtwdev, 3393*8e93258fSBjoern A. Zeeb RTW89_PCIE_L1_CTRL, 3394*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_CLK); 3395*8e93258fSBjoern A. Zeeb else 3396*8e93258fSBjoern A. Zeeb ret = rtw89_pci_config_byte_clr(rtwdev, 3397*8e93258fSBjoern A. Zeeb RTW89_PCIE_L1_CTRL, 3398*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_CLK); 3399*8e93258fSBjoern A. Zeeb if (ret) 3400*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d", 3401*8e93258fSBjoern A. Zeeb enable ? "set" : "unset", ret); 3402*8e93258fSBjoern A. Zeeb } else if (chip_id == RTL8852C) { 3403*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL, 3404*8e93258fSBjoern A. Zeeb B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL); 3405*8e93258fSBjoern A. Zeeb if (enable) 3406*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL, 3407*8e93258fSBjoern A. Zeeb B_AX_CLK_REQ_N); 3408*8e93258fSBjoern A. Zeeb else 3409*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL, 3410*8e93258fSBjoern A. Zeeb B_AX_CLK_REQ_N); 3411*8e93258fSBjoern A. Zeeb } 3412*8e93258fSBjoern A. Zeeb } 3413*8e93258fSBjoern A. Zeeb 3414*8e93258fSBjoern A. Zeeb static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable) 3415*8e93258fSBjoern A. Zeeb { 3416*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3417*8e93258fSBjoern A. Zeeb u8 value = 0; 3418*8e93258fSBjoern A. Zeeb int ret; 3419*8e93258fSBjoern A. Zeeb 3420*8e93258fSBjoern A. Zeeb if (rtw89_pci_disable_aspm_l1) 3421*8e93258fSBjoern A. Zeeb return; 3422*8e93258fSBjoern A. Zeeb 3423*8e93258fSBjoern A. Zeeb ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value); 3424*8e93258fSBjoern A. Zeeb if (ret) 3425*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3426*8e93258fSBjoern A. Zeeb 3427*8e93258fSBjoern A. Zeeb value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK); 3428*8e93258fSBjoern A. Zeeb value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) | 3429*8e93258fSBjoern A. Zeeb FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US); 3430*8e93258fSBjoern A. Zeeb 3431*8e93258fSBjoern A. Zeeb ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value); 3432*8e93258fSBjoern A. Zeeb if (ret) 3433*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to read ASPM Delay\n"); 3434*8e93258fSBjoern A. Zeeb 3435*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) { 3436*8e93258fSBjoern A. Zeeb if (enable) 3437*8e93258fSBjoern A. Zeeb ret = rtw89_pci_config_byte_set(rtwdev, 3438*8e93258fSBjoern A. Zeeb RTW89_PCIE_L1_CTRL, 3439*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_L1); 3440*8e93258fSBjoern A. Zeeb else 3441*8e93258fSBjoern A. Zeeb ret = rtw89_pci_config_byte_clr(rtwdev, 3442*8e93258fSBjoern A. Zeeb RTW89_PCIE_L1_CTRL, 3443*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_L1); 3444*8e93258fSBjoern A. Zeeb } else if (chip_id == RTL8852C) { 3445*8e93258fSBjoern A. Zeeb if (enable) 3446*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3447*8e93258fSBjoern A. Zeeb B_AX_ASPM_CTRL_L1); 3448*8e93258fSBjoern A. Zeeb else 3449*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3450*8e93258fSBjoern A. Zeeb B_AX_ASPM_CTRL_L1); 3451*8e93258fSBjoern A. Zeeb } 3452*8e93258fSBjoern A. Zeeb if (ret) 3453*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d", 3454*8e93258fSBjoern A. Zeeb enable ? "set" : "unset", ret); 3455*8e93258fSBjoern A. Zeeb } 3456*8e93258fSBjoern A. Zeeb 3457*8e93258fSBjoern A. Zeeb static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev) 3458*8e93258fSBjoern A. Zeeb { 3459*8e93258fSBjoern A. Zeeb struct rtw89_traffic_stats *stats = &rtwdev->stats; 3460*8e93258fSBjoern A. Zeeb enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv; 3461*8e93258fSBjoern A. Zeeb enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv; 3462*8e93258fSBjoern A. Zeeb u32 val = 0; 3463*8e93258fSBjoern A. Zeeb 3464*8e93258fSBjoern A. Zeeb if (!rtwdev->scanning && 3465*8e93258fSBjoern A. Zeeb (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH)) 3466*8e93258fSBjoern A. Zeeb val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL | 3467*8e93258fSBjoern A. Zeeb FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) | 3468*8e93258fSBjoern A. Zeeb FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) | 3469*8e93258fSBjoern A. Zeeb FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64); 3470*8e93258fSBjoern A. Zeeb 3471*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val); 3472*8e93258fSBjoern A. Zeeb } 3473*8e93258fSBjoern A. Zeeb 3474*8e93258fSBjoern A. Zeeb static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev) 3475*8e93258fSBjoern A. Zeeb { 3476*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3477*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 3478*8e93258fSBjoern A. Zeeb u16 link_ctrl; 3479*8e93258fSBjoern A. Zeeb int ret; 3480*8e93258fSBjoern A. Zeeb 3481*8e93258fSBjoern A. Zeeb /* Though there is standard PCIE configuration space to set the 3482*8e93258fSBjoern A. Zeeb * link control register, but by Realtek's design, driver should 3483*8e93258fSBjoern A. Zeeb * check if host supports CLKREQ/ASPM to enable the HW module. 3484*8e93258fSBjoern A. Zeeb * 3485*8e93258fSBjoern A. Zeeb * These functions are implemented by two HW modules associated, 3486*8e93258fSBjoern A. Zeeb * one is responsible to access PCIE configuration space to 3487*8e93258fSBjoern A. Zeeb * follow the host settings, and another is in charge of doing 3488*8e93258fSBjoern A. Zeeb * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes 3489*8e93258fSBjoern A. Zeeb * the host does not support it, and due to some reasons or wrong 3490*8e93258fSBjoern A. Zeeb * settings (ex. CLKREQ# not Bi-Direction), it could lead to device 3491*8e93258fSBjoern A. Zeeb * loss if HW misbehaves on the link. 3492*8e93258fSBjoern A. Zeeb * 3493*8e93258fSBjoern A. Zeeb * Hence it's designed that driver should first check the PCIE 3494*8e93258fSBjoern A. Zeeb * configuration space is sync'ed and enabled, then driver can turn 3495*8e93258fSBjoern A. Zeeb * on the other module that is actually working on the mechanism. 3496*8e93258fSBjoern A. Zeeb */ 3497*8e93258fSBjoern A. Zeeb ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl); 3498*8e93258fSBjoern A. Zeeb if (ret) { 3499*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret); 3500*8e93258fSBjoern A. Zeeb return; 3501*8e93258fSBjoern A. Zeeb } 3502*8e93258fSBjoern A. Zeeb 3503*8e93258fSBjoern A. Zeeb if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN) 3504*8e93258fSBjoern A. Zeeb rtw89_pci_clkreq_set(rtwdev, true); 3505*8e93258fSBjoern A. Zeeb 3506*8e93258fSBjoern A. Zeeb if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) 3507*8e93258fSBjoern A. Zeeb rtw89_pci_aspm_set(rtwdev, true); 3508*8e93258fSBjoern A. Zeeb } 3509*8e93258fSBjoern A. Zeeb 3510*8e93258fSBjoern A. Zeeb static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable) 3511*8e93258fSBjoern A. Zeeb { 3512*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3513*8e93258fSBjoern A. Zeeb int ret; 3514*8e93258fSBjoern A. Zeeb 3515*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) { 3516*8e93258fSBjoern A. Zeeb if (enable) 3517*8e93258fSBjoern A. Zeeb ret = rtw89_pci_config_byte_set(rtwdev, 3518*8e93258fSBjoern A. Zeeb RTW89_PCIE_TIMER_CTRL, 3519*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_L1SUB); 3520*8e93258fSBjoern A. Zeeb else 3521*8e93258fSBjoern A. Zeeb ret = rtw89_pci_config_byte_clr(rtwdev, 3522*8e93258fSBjoern A. Zeeb RTW89_PCIE_TIMER_CTRL, 3523*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_L1SUB); 3524*8e93258fSBjoern A. Zeeb if (ret) 3525*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to %s L1SS, ret=%d", 3526*8e93258fSBjoern A. Zeeb enable ? "set" : "unset", ret); 3527*8e93258fSBjoern A. Zeeb } else if (chip_id == RTL8852C) { 3528*8e93258fSBjoern A. Zeeb ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1, 3529*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_ASPM_L11 | 3530*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_PCI_L11); 3531*8e93258fSBjoern A. Zeeb if (ret) 3532*8e93258fSBjoern A. Zeeb rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret); 3533*8e93258fSBjoern A. Zeeb if (enable) 3534*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3535*8e93258fSBjoern A. Zeeb B_AX_L1SUB_DISABLE); 3536*8e93258fSBjoern A. Zeeb else 3537*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1, 3538*8e93258fSBjoern A. Zeeb B_AX_L1SUB_DISABLE); 3539*8e93258fSBjoern A. Zeeb } 3540*8e93258fSBjoern A. Zeeb } 3541*8e93258fSBjoern A. Zeeb 3542*8e93258fSBjoern A. Zeeb static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev) 3543*8e93258fSBjoern A. Zeeb { 3544*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3545*8e93258fSBjoern A. Zeeb struct pci_dev *pdev = rtwpci->pdev; 3546*8e93258fSBjoern A. Zeeb u32 l1ss_cap_ptr, l1ss_ctrl; 3547*8e93258fSBjoern A. Zeeb 3548*8e93258fSBjoern A. Zeeb if (rtw89_pci_disable_l1ss) 3549*8e93258fSBjoern A. Zeeb return; 3550*8e93258fSBjoern A. Zeeb 3551*8e93258fSBjoern A. Zeeb l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); 3552*8e93258fSBjoern A. Zeeb if (!l1ss_cap_ptr) 3553*8e93258fSBjoern A. Zeeb return; 3554*8e93258fSBjoern A. Zeeb 3555*8e93258fSBjoern A. Zeeb pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl); 3556*8e93258fSBjoern A. Zeeb 3557*8e93258fSBjoern A. Zeeb if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK) 3558*8e93258fSBjoern A. Zeeb rtw89_pci_l1ss_set(rtwdev, true); 3559*8e93258fSBjoern A. Zeeb } 3560*8e93258fSBjoern A. Zeeb 3561*8e93258fSBjoern A. Zeeb static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en) 3562*8e93258fSBjoern A. Zeeb { 3563*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *info = rtwdev->pci_info; 3564*8e93258fSBjoern A. Zeeb u32 val32; 3565*8e93258fSBjoern A. Zeeb 3566*8e93258fSBjoern A. Zeeb if (en == MAC_AX_FUNC_EN) { 3567*8e93258fSBjoern A. Zeeb val32 = B_AX_STOP_PCIEIO; 3568*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, info->dma_stop1_reg, val32); 3569*8e93258fSBjoern A. Zeeb 3570*8e93258fSBjoern A. Zeeb val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 3571*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3572*8e93258fSBjoern A. Zeeb } else { 3573*8e93258fSBjoern A. Zeeb val32 = B_AX_STOP_PCIEIO; 3574*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, info->dma_stop1_reg, val32); 3575*8e93258fSBjoern A. Zeeb 3576*8e93258fSBjoern A. Zeeb val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN; 3577*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3578*8e93258fSBjoern A. Zeeb } 3579*8e93258fSBjoern A. Zeeb } 3580*8e93258fSBjoern A. Zeeb 3581*8e93258fSBjoern A. Zeeb static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev) 3582*8e93258fSBjoern A. Zeeb { 3583*8e93258fSBjoern A. Zeeb int ret = 0; 3584*8e93258fSBjoern A. Zeeb u32 sts; 3585*8e93258fSBjoern A. Zeeb u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY; 3586*8e93258fSBjoern A. Zeeb 3587*8e93258fSBjoern A. Zeeb ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0, 3588*8e93258fSBjoern A. Zeeb 10, 1000, false, rtwdev, 3589*8e93258fSBjoern A. Zeeb R_AX_PCIE_DMA_BUSY1); 3590*8e93258fSBjoern A. Zeeb if (ret) { 3591*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "pci dmach busy1 0x%X\n", 3592*8e93258fSBjoern A. Zeeb rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1)); 3593*8e93258fSBjoern A. Zeeb return -EINVAL; 3594*8e93258fSBjoern A. Zeeb } 3595*8e93258fSBjoern A. Zeeb return ret; 3596*8e93258fSBjoern A. Zeeb } 3597*8e93258fSBjoern A. Zeeb 3598*8e93258fSBjoern A. Zeeb static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev) 3599*8e93258fSBjoern A. Zeeb { 3600*8e93258fSBjoern A. Zeeb u32 val, dma_rst = 0; 3601*8e93258fSBjoern A. Zeeb int ret; 3602*8e93258fSBjoern A. Zeeb 3603*8e93258fSBjoern A. Zeeb rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS); 3604*8e93258fSBjoern A. Zeeb ret = rtw89_pci_poll_io_idle(rtwdev); 3605*8e93258fSBjoern A. Zeeb if (ret) { 3606*8e93258fSBjoern A. Zeeb val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3607*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_HCI, 3608*8e93258fSBjoern A. Zeeb "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n", 3609*8e93258fSBjoern A. Zeeb R_AX_DBG_ERR_FLAG, val); 3610*8e93258fSBjoern A. Zeeb if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0) 3611*8e93258fSBjoern A. Zeeb dma_rst |= B_AX_HCI_TXDMA_EN; 3612*8e93258fSBjoern A. Zeeb if (val & B_AX_RX_STUCK) 3613*8e93258fSBjoern A. Zeeb dma_rst |= B_AX_HCI_RXDMA_EN; 3614*8e93258fSBjoern A. Zeeb val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN); 3615*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst); 3616*8e93258fSBjoern A. Zeeb rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst); 3617*8e93258fSBjoern A. Zeeb ret = rtw89_pci_poll_io_idle(rtwdev); 3618*8e93258fSBjoern A. Zeeb val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG); 3619*8e93258fSBjoern A. Zeeb rtw89_debug(rtwdev, RTW89_DBG_HCI, 3620*8e93258fSBjoern A. Zeeb "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n", 3621*8e93258fSBjoern A. Zeeb R_AX_DBG_ERR_FLAG, val); 3622*8e93258fSBjoern A. Zeeb } 3623*8e93258fSBjoern A. Zeeb 3624*8e93258fSBjoern A. Zeeb return ret; 3625*8e93258fSBjoern A. Zeeb } 3626*8e93258fSBjoern A. Zeeb 3627*8e93258fSBjoern A. Zeeb static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en) 3628*8e93258fSBjoern A. Zeeb { 3629*8e93258fSBjoern A. Zeeb u32 val32; 3630*8e93258fSBjoern A. Zeeb 3631*8e93258fSBjoern A. Zeeb if (en == MAC_AX_FUNC_EN) { 3632*8e93258fSBjoern A. Zeeb val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 3633*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32); 3634*8e93258fSBjoern A. Zeeb } else { 3635*8e93258fSBjoern A. Zeeb val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN; 3636*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32); 3637*8e93258fSBjoern A. Zeeb } 3638*8e93258fSBjoern A. Zeeb } 3639*8e93258fSBjoern A. Zeeb 3640*8e93258fSBjoern A. Zeeb static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev) 3641*8e93258fSBjoern A. Zeeb { 3642*8e93258fSBjoern A. Zeeb int ret = 0; 3643*8e93258fSBjoern A. Zeeb u32 val32, sts; 3644*8e93258fSBjoern A. Zeeb 3645*8e93258fSBjoern A. Zeeb val32 = B_AX_RST_BDRAM; 3646*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32); 3647*8e93258fSBjoern A. Zeeb 3648*8e93258fSBjoern A. Zeeb ret = read_poll_timeout_atomic(rtw89_read32, sts, 3649*8e93258fSBjoern A. Zeeb (sts & B_AX_RST_BDRAM) == 0x0, 1, 100, 3650*8e93258fSBjoern A. Zeeb true, rtwdev, R_AX_PCIE_INIT_CFG1); 3651*8e93258fSBjoern A. Zeeb return ret; 3652*8e93258fSBjoern A. Zeeb } 3653*8e93258fSBjoern A. Zeeb 3654*8e93258fSBjoern A. Zeeb static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev) 3655*8e93258fSBjoern A. Zeeb { 3656*8e93258fSBjoern A. Zeeb u32 ret; 3657*8e93258fSBjoern A. Zeeb 3658*8e93258fSBjoern A. Zeeb rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS); 3659*8e93258fSBjoern A. Zeeb rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN); 3660*8e93258fSBjoern A. Zeeb rtw89_pci_clr_idx_all(rtwdev); 3661*8e93258fSBjoern A. Zeeb 3662*8e93258fSBjoern A. Zeeb ret = rtw89_pci_rst_bdram(rtwdev); 3663*8e93258fSBjoern A. Zeeb if (ret) 3664*8e93258fSBjoern A. Zeeb return ret; 3665*8e93258fSBjoern A. Zeeb 3666*8e93258fSBjoern A. Zeeb rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN); 3667*8e93258fSBjoern A. Zeeb return ret; 3668*8e93258fSBjoern A. Zeeb } 3669*8e93258fSBjoern A. Zeeb 3670*8e93258fSBjoern A. Zeeb static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev, 3671*8e93258fSBjoern A. Zeeb enum rtw89_lv1_rcvy_step step) 3672*8e93258fSBjoern A. Zeeb { 3673*8e93258fSBjoern A. Zeeb int ret; 3674*8e93258fSBjoern A. Zeeb 3675*8e93258fSBjoern A. Zeeb switch (step) { 3676*8e93258fSBjoern A. Zeeb case RTW89_LV1_RCVY_STEP_1: 3677*8e93258fSBjoern A. Zeeb ret = rtw89_pci_lv1rst_stop_dma(rtwdev); 3678*8e93258fSBjoern A. Zeeb if (ret) 3679*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n"); 3680*8e93258fSBjoern A. Zeeb 3681*8e93258fSBjoern A. Zeeb break; 3682*8e93258fSBjoern A. Zeeb 3683*8e93258fSBjoern A. Zeeb case RTW89_LV1_RCVY_STEP_2: 3684*8e93258fSBjoern A. Zeeb ret = rtw89_pci_lv1rst_start_dma(rtwdev); 3685*8e93258fSBjoern A. Zeeb if (ret) 3686*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n"); 3687*8e93258fSBjoern A. Zeeb break; 3688*8e93258fSBjoern A. Zeeb 3689*8e93258fSBjoern A. Zeeb default: 3690*8e93258fSBjoern A. Zeeb return -EINVAL; 3691*8e93258fSBjoern A. Zeeb } 3692*8e93258fSBjoern A. Zeeb 3693*8e93258fSBjoern A. Zeeb return ret; 3694*8e93258fSBjoern A. Zeeb } 3695*8e93258fSBjoern A. Zeeb 3696*8e93258fSBjoern A. Zeeb static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev) 3697*8e93258fSBjoern A. Zeeb { 3698*8e93258fSBjoern A. Zeeb rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n", 3699*8e93258fSBjoern A. Zeeb rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); 3700*8e93258fSBjoern A. Zeeb rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n", 3701*8e93258fSBjoern A. Zeeb rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG)); 3702*8e93258fSBjoern A. Zeeb rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n", 3703*8e93258fSBjoern A. Zeeb rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG)); 3704*8e93258fSBjoern A. Zeeb } 3705*8e93258fSBjoern A. Zeeb 3706*8e93258fSBjoern A. Zeeb static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget) 3707*8e93258fSBjoern A. Zeeb { 3708*8e93258fSBjoern A. Zeeb struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi); 3709*8e93258fSBjoern A. Zeeb struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; 3710*8e93258fSBjoern A. Zeeb unsigned long flags; 3711*8e93258fSBjoern A. Zeeb int work_done; 3712*8e93258fSBjoern A. Zeeb 3713*8e93258fSBjoern A. Zeeb rtwdev->napi_budget_countdown = budget; 3714*8e93258fSBjoern A. Zeeb 3715*8e93258fSBjoern A. Zeeb rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT); 3716*8e93258fSBjoern A. Zeeb work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3717*8e93258fSBjoern A. Zeeb if (work_done == budget) 3718*8e93258fSBjoern A. Zeeb return budget; 3719*8e93258fSBjoern A. Zeeb 3720*8e93258fSBjoern A. Zeeb rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT); 3721*8e93258fSBjoern A. Zeeb work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown); 3722*8e93258fSBjoern A. Zeeb if (work_done < budget && napi_complete_done(napi, work_done)) { 3723*8e93258fSBjoern A. Zeeb spin_lock_irqsave(&rtwpci->irq_lock, flags); 3724*8e93258fSBjoern A. Zeeb if (likely(rtwpci->running)) 3725*8e93258fSBjoern A. Zeeb rtw89_chip_enable_intr(rtwdev, rtwpci); 3726*8e93258fSBjoern A. Zeeb spin_unlock_irqrestore(&rtwpci->irq_lock, flags); 3727*8e93258fSBjoern A. Zeeb } 3728*8e93258fSBjoern A. Zeeb 3729*8e93258fSBjoern A. Zeeb return work_done; 3730*8e93258fSBjoern A. Zeeb } 3731*8e93258fSBjoern A. Zeeb 3732*8e93258fSBjoern A. Zeeb static int __maybe_unused rtw89_pci_suspend(struct device *dev) 3733*8e93258fSBjoern A. Zeeb { 3734*8e93258fSBjoern A. Zeeb struct ieee80211_hw *hw = dev_get_drvdata(dev); 3735*8e93258fSBjoern A. Zeeb struct rtw89_dev *rtwdev = hw->priv; 3736*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3737*8e93258fSBjoern A. Zeeb 3738*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3739*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3740*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3741*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) { 3742*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, 3743*8e93258fSBjoern A. Zeeb B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3744*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, 3745*8e93258fSBjoern A. Zeeb B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3746*8e93258fSBjoern A. Zeeb } else { 3747*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3748*8e93258fSBjoern A. Zeeb B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3749*8e93258fSBjoern A. Zeeb } 3750*8e93258fSBjoern A. Zeeb 3751*8e93258fSBjoern A. Zeeb return 0; 3752*8e93258fSBjoern A. Zeeb } 3753*8e93258fSBjoern A. Zeeb 3754*8e93258fSBjoern A. Zeeb static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev) 3755*8e93258fSBjoern A. Zeeb { 3756*8e93258fSBjoern A. Zeeb if (rtwdev->chip->chip_id == RTL8852C) 3757*8e93258fSBjoern A. Zeeb return; 3758*8e93258fSBjoern A. Zeeb 3759*8e93258fSBjoern A. Zeeb /* Hardware need write the reg twice to ensure the setting work */ 3760*8e93258fSBjoern A. Zeeb rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3761*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_CFG_RST_MSTATE); 3762*8e93258fSBjoern A. Zeeb rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE, 3763*8e93258fSBjoern A. Zeeb RTW89_PCIE_BIT_CFG_RST_MSTATE); 3764*8e93258fSBjoern A. Zeeb } 3765*8e93258fSBjoern A. Zeeb 3766*8e93258fSBjoern A. Zeeb static int __maybe_unused rtw89_pci_resume(struct device *dev) 3767*8e93258fSBjoern A. Zeeb { 3768*8e93258fSBjoern A. Zeeb struct ieee80211_hw *hw = dev_get_drvdata(dev); 3769*8e93258fSBjoern A. Zeeb struct rtw89_dev *rtwdev = hw->priv; 3770*8e93258fSBjoern A. Zeeb enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; 3771*8e93258fSBjoern A. Zeeb 3772*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3773*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); 3774*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); 3775*8e93258fSBjoern A. Zeeb if (chip_id == RTL8852A || chip_id == RTL8852B) { 3776*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, 3777*8e93258fSBjoern A. Zeeb B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); 3778*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, 3779*8e93258fSBjoern A. Zeeb B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG); 3780*8e93258fSBjoern A. Zeeb } else { 3781*8e93258fSBjoern A. Zeeb rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3782*8e93258fSBjoern A. Zeeb B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN); 3783*8e93258fSBjoern A. Zeeb rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, 3784*8e93258fSBjoern A. Zeeb B_AX_SEL_REQ_ENTR_L1); 3785*8e93258fSBjoern A. Zeeb } 3786*8e93258fSBjoern A. Zeeb rtw89_pci_l2_hci_ldo(rtwdev); 3787*8e93258fSBjoern A. Zeeb rtw89_pci_filter_out(rtwdev); 3788*8e93258fSBjoern A. Zeeb rtw89_pci_link_cfg(rtwdev); 3789*8e93258fSBjoern A. Zeeb rtw89_pci_l1ss_cfg(rtwdev); 3790*8e93258fSBjoern A. Zeeb 3791*8e93258fSBjoern A. Zeeb return 0; 3792*8e93258fSBjoern A. Zeeb } 3793*8e93258fSBjoern A. Zeeb 3794*8e93258fSBjoern A. Zeeb SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume); 3795*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pm_ops); 3796*8e93258fSBjoern A. Zeeb 3797*8e93258fSBjoern A. Zeeb static const struct rtw89_hci_ops rtw89_pci_ops = { 3798*8e93258fSBjoern A. Zeeb .tx_write = rtw89_pci_ops_tx_write, 3799*8e93258fSBjoern A. Zeeb .tx_kick_off = rtw89_pci_ops_tx_kick_off, 3800*8e93258fSBjoern A. Zeeb .flush_queues = rtw89_pci_ops_flush_queues, 3801*8e93258fSBjoern A. Zeeb .reset = rtw89_pci_ops_reset, 3802*8e93258fSBjoern A. Zeeb .start = rtw89_pci_ops_start, 3803*8e93258fSBjoern A. Zeeb .stop = rtw89_pci_ops_stop, 3804*8e93258fSBjoern A. Zeeb .pause = rtw89_pci_ops_pause, 3805*8e93258fSBjoern A. Zeeb .switch_mode = rtw89_pci_ops_switch_mode, 3806*8e93258fSBjoern A. Zeeb .recalc_int_mit = rtw89_pci_recalc_int_mit, 3807*8e93258fSBjoern A. Zeeb 3808*8e93258fSBjoern A. Zeeb .read8 = rtw89_pci_ops_read8, 3809*8e93258fSBjoern A. Zeeb .read16 = rtw89_pci_ops_read16, 3810*8e93258fSBjoern A. Zeeb .read32 = rtw89_pci_ops_read32, 3811*8e93258fSBjoern A. Zeeb .write8 = rtw89_pci_ops_write8, 3812*8e93258fSBjoern A. Zeeb .write16 = rtw89_pci_ops_write16, 3813*8e93258fSBjoern A. Zeeb .write32 = rtw89_pci_ops_write32, 3814*8e93258fSBjoern A. Zeeb 3815*8e93258fSBjoern A. Zeeb .mac_pre_init = rtw89_pci_ops_mac_pre_init, 3816*8e93258fSBjoern A. Zeeb .mac_post_init = rtw89_pci_ops_mac_post_init, 3817*8e93258fSBjoern A. Zeeb .deinit = rtw89_pci_ops_deinit, 3818*8e93258fSBjoern A. Zeeb 3819*8e93258fSBjoern A. Zeeb .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource, 3820*8e93258fSBjoern A. Zeeb .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery, 3821*8e93258fSBjoern A. Zeeb .dump_err_status = rtw89_pci_ops_dump_err_status, 3822*8e93258fSBjoern A. Zeeb .napi_poll = rtw89_pci_napi_poll, 3823*8e93258fSBjoern A. Zeeb 3824*8e93258fSBjoern A. Zeeb .recovery_start = rtw89_pci_ops_recovery_start, 3825*8e93258fSBjoern A. Zeeb .recovery_complete = rtw89_pci_ops_recovery_complete, 3826*8e93258fSBjoern A. Zeeb }; 3827*8e93258fSBjoern A. Zeeb 3828*8e93258fSBjoern A. Zeeb int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3829*8e93258fSBjoern A. Zeeb { 3830*8e93258fSBjoern A. Zeeb struct rtw89_dev *rtwdev; 3831*8e93258fSBjoern A. Zeeb const struct rtw89_driver_info *info; 3832*8e93258fSBjoern A. Zeeb const struct rtw89_pci_info *pci_info; 3833*8e93258fSBjoern A. Zeeb int ret; 3834*8e93258fSBjoern A. Zeeb 3835*8e93258fSBjoern A. Zeeb info = (const struct rtw89_driver_info *)id->driver_data; 3836*8e93258fSBjoern A. Zeeb 3837*8e93258fSBjoern A. Zeeb rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev, 3838*8e93258fSBjoern A. Zeeb sizeof(struct rtw89_pci), 3839*8e93258fSBjoern A. Zeeb info->chip); 3840*8e93258fSBjoern A. Zeeb if (!rtwdev) { 3841*8e93258fSBjoern A. Zeeb dev_err(&pdev->dev, "failed to allocate hw\n"); 3842*8e93258fSBjoern A. Zeeb return -ENOMEM; 3843*8e93258fSBjoern A. Zeeb } 3844*8e93258fSBjoern A. Zeeb 3845*8e93258fSBjoern A. Zeeb pci_info = info->bus.pci; 3846*8e93258fSBjoern A. Zeeb 3847*8e93258fSBjoern A. Zeeb rtwdev->pci_info = info->bus.pci; 3848*8e93258fSBjoern A. Zeeb rtwdev->hci.ops = &rtw89_pci_ops; 3849*8e93258fSBjoern A. Zeeb rtwdev->hci.type = RTW89_HCI_TYPE_PCIE; 3850*8e93258fSBjoern A. Zeeb rtwdev->hci.rpwm_addr = pci_info->rpwm_addr; 3851*8e93258fSBjoern A. Zeeb rtwdev->hci.cpwm_addr = pci_info->cpwm_addr; 3852*8e93258fSBjoern A. Zeeb 3853*8e93258fSBjoern A. Zeeb SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); 3854*8e93258fSBjoern A. Zeeb 3855*8e93258fSBjoern A. Zeeb ret = rtw89_core_init(rtwdev); 3856*8e93258fSBjoern A. Zeeb if (ret) { 3857*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to initialise core\n"); 3858*8e93258fSBjoern A. Zeeb goto err_release_hw; 3859*8e93258fSBjoern A. Zeeb } 3860*8e93258fSBjoern A. Zeeb 3861*8e93258fSBjoern A. Zeeb ret = rtw89_pci_claim_device(rtwdev, pdev); 3862*8e93258fSBjoern A. Zeeb if (ret) { 3863*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to claim pci device\n"); 3864*8e93258fSBjoern A. Zeeb goto err_core_deinit; 3865*8e93258fSBjoern A. Zeeb } 3866*8e93258fSBjoern A. Zeeb 3867*8e93258fSBjoern A. Zeeb ret = rtw89_pci_setup_resource(rtwdev, pdev); 3868*8e93258fSBjoern A. Zeeb if (ret) { 3869*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to setup pci resource\n"); 3870*8e93258fSBjoern A. Zeeb goto err_declaim_pci; 3871*8e93258fSBjoern A. Zeeb } 3872*8e93258fSBjoern A. Zeeb 3873*8e93258fSBjoern A. Zeeb ret = rtw89_chip_info_setup(rtwdev); 3874*8e93258fSBjoern A. Zeeb if (ret) { 3875*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to setup chip information\n"); 3876*8e93258fSBjoern A. Zeeb goto err_clear_resource; 3877*8e93258fSBjoern A. Zeeb } 3878*8e93258fSBjoern A. Zeeb 3879*8e93258fSBjoern A. Zeeb rtw89_pci_filter_out(rtwdev); 3880*8e93258fSBjoern A. Zeeb rtw89_pci_link_cfg(rtwdev); 3881*8e93258fSBjoern A. Zeeb rtw89_pci_l1ss_cfg(rtwdev); 3882*8e93258fSBjoern A. Zeeb 3883*8e93258fSBjoern A. Zeeb ret = rtw89_core_register(rtwdev); 3884*8e93258fSBjoern A. Zeeb if (ret) { 3885*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to register core\n"); 3886*8e93258fSBjoern A. Zeeb goto err_clear_resource; 3887*8e93258fSBjoern A. Zeeb } 3888*8e93258fSBjoern A. Zeeb 3889*8e93258fSBjoern A. Zeeb rtw89_core_napi_init(rtwdev); 3890*8e93258fSBjoern A. Zeeb 3891*8e93258fSBjoern A. Zeeb ret = rtw89_pci_request_irq(rtwdev, pdev); 3892*8e93258fSBjoern A. Zeeb if (ret) { 3893*8e93258fSBjoern A. Zeeb rtw89_err(rtwdev, "failed to request pci irq\n"); 3894*8e93258fSBjoern A. Zeeb goto err_unregister; 3895*8e93258fSBjoern A. Zeeb } 3896*8e93258fSBjoern A. Zeeb 3897*8e93258fSBjoern A. Zeeb return 0; 3898*8e93258fSBjoern A. Zeeb 3899*8e93258fSBjoern A. Zeeb err_unregister: 3900*8e93258fSBjoern A. Zeeb rtw89_core_napi_deinit(rtwdev); 3901*8e93258fSBjoern A. Zeeb rtw89_core_unregister(rtwdev); 3902*8e93258fSBjoern A. Zeeb err_clear_resource: 3903*8e93258fSBjoern A. Zeeb rtw89_pci_clear_resource(rtwdev, pdev); 3904*8e93258fSBjoern A. Zeeb err_declaim_pci: 3905*8e93258fSBjoern A. Zeeb rtw89_pci_declaim_device(rtwdev, pdev); 3906*8e93258fSBjoern A. Zeeb err_core_deinit: 3907*8e93258fSBjoern A. Zeeb rtw89_core_deinit(rtwdev); 3908*8e93258fSBjoern A. Zeeb err_release_hw: 3909*8e93258fSBjoern A. Zeeb rtw89_free_ieee80211_hw(rtwdev); 3910*8e93258fSBjoern A. Zeeb 3911*8e93258fSBjoern A. Zeeb return ret; 3912*8e93258fSBjoern A. Zeeb } 3913*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_probe); 3914*8e93258fSBjoern A. Zeeb 3915*8e93258fSBjoern A. Zeeb void rtw89_pci_remove(struct pci_dev *pdev) 3916*8e93258fSBjoern A. Zeeb { 3917*8e93258fSBjoern A. Zeeb struct ieee80211_hw *hw = pci_get_drvdata(pdev); 3918*8e93258fSBjoern A. Zeeb struct rtw89_dev *rtwdev; 3919*8e93258fSBjoern A. Zeeb 3920*8e93258fSBjoern A. Zeeb rtwdev = hw->priv; 3921*8e93258fSBjoern A. Zeeb 3922*8e93258fSBjoern A. Zeeb rtw89_pci_free_irq(rtwdev, pdev); 3923*8e93258fSBjoern A. Zeeb rtw89_core_napi_deinit(rtwdev); 3924*8e93258fSBjoern A. Zeeb rtw89_core_unregister(rtwdev); 3925*8e93258fSBjoern A. Zeeb rtw89_pci_clear_resource(rtwdev, pdev); 3926*8e93258fSBjoern A. Zeeb rtw89_pci_declaim_device(rtwdev, pdev); 3927*8e93258fSBjoern A. Zeeb rtw89_core_deinit(rtwdev); 3928*8e93258fSBjoern A. Zeeb rtw89_free_ieee80211_hw(rtwdev); 3929*8e93258fSBjoern A. Zeeb } 3930*8e93258fSBjoern A. Zeeb EXPORT_SYMBOL(rtw89_pci_remove); 3931*8e93258fSBjoern A. Zeeb 3932*8e93258fSBjoern A. Zeeb MODULE_AUTHOR("Realtek Corporation"); 3933*8e93258fSBjoern A. Zeeb MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver"); 3934*8e93258fSBjoern A. Zeeb MODULE_LICENSE("Dual BSD/GPL"); 3935*8e93258fSBjoern A. Zeeb #if defined(__FreeBSD__) 3936*8e93258fSBjoern A. Zeeb MODULE_VERSION(rtw89_pci, 1); 3937*8e93258fSBjoern A. Zeeb MODULE_DEPEND(rtw89_pci, linuxkpi, 1, 1, 1); 3938*8e93258fSBjoern A. Zeeb MODULE_DEPEND(rtw89_pci, linuxkpi_wlan, 1, 1, 1); 3939*8e93258fSBjoern A. Zeeb #ifdef CONFIG_RTW89_DEBUGFS 3940*8e93258fSBjoern A. Zeeb MODULE_DEPEND(rtw89_pci, debugfs, 1, 1, 1); 3941*8e93258fSBjoern A. Zeeb #endif 3942*8e93258fSBjoern A. Zeeb #endif 3943