Lines Matching +full:dma +full:- +full:poll +full:- +full:cnt
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
55 return skb->priority; in rtw_pci_get_tx_qsel()
61 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read8()
64 return readb(rtwpci->mmap + addr); in rtw_pci_read8()
68 val = bus_read_1((struct resource *)rtwpci->mmap, addr); in rtw_pci_read8()
69 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val); in rtw_pci_read8()
76 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read16()
79 return readw(rtwpci->mmap + addr); in rtw_pci_read16()
83 val = bus_read_2((struct resource *)rtwpci->mmap, addr); in rtw_pci_read16()
84 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val); in rtw_pci_read16()
91 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read32()
94 return readl(rtwpci->mmap + addr); in rtw_pci_read32()
98 val = bus_read_4((struct resource *)rtwpci->mmap, addr); in rtw_pci_read32()
99 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val); in rtw_pci_read32()
106 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write8()
109 writeb(val, rtwpci->mmap + addr); in rtw_pci_write8()
111 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, val); in rtw_pci_write8()
112 return (bus_write_1((struct resource *)rtwpci->mmap, addr, val)); in rtw_pci_write8()
118 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write16()
121 writew(val, rtwpci->mmap + addr); in rtw_pci_write16()
123 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, val); in rtw_pci_write16()
124 return (bus_write_2((struct resource *)rtwpci->mmap, addr, val)); in rtw_pci_write16()
130 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write32()
133 writel(val, rtwpci->mmap + addr); in rtw_pci_write32()
135 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, val); in rtw_pci_write32()
136 return (bus_write_4((struct resource *)rtwpci->mmap, addr, val)); in rtw_pci_write32()
143 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_tx_ring_skbs()
146 dma_addr_t dma; in rtw_pci_free_tx_ring_skbs() local
149 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { in rtw_pci_free_tx_ring_skbs()
150 __skb_unlink(skb, &tx_ring->queue); in rtw_pci_free_tx_ring_skbs()
152 dma = tx_data->dma; in rtw_pci_free_tx_ring_skbs()
154 dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE); in rtw_pci_free_tx_ring_skbs()
162 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_tx_ring()
163 u8 *head = tx_ring->r.head; in rtw_pci_free_tx_ring()
164 u32 len = tx_ring->r.len; in rtw_pci_free_tx_ring()
165 int ring_sz = len * tx_ring->r.desc_size; in rtw_pci_free_tx_ring()
170 dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma); in rtw_pci_free_tx_ring()
171 tx_ring->r.head = NULL; in rtw_pci_free_tx_ring()
177 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_rx_ring_skbs()
180 dma_addr_t dma; in rtw_pci_free_rx_ring_skbs() local
183 for (i = 0; i < rx_ring->r.len; i++) { in rtw_pci_free_rx_ring_skbs()
184 skb = rx_ring->buf[i]; in rtw_pci_free_rx_ring_skbs()
188 dma = *((dma_addr_t *)skb->cb); in rtw_pci_free_rx_ring_skbs()
189 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); in rtw_pci_free_rx_ring_skbs()
191 rx_ring->buf[i] = NULL; in rtw_pci_free_rx_ring_skbs()
198 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_rx_ring()
199 u8 *head = rx_ring->r.head; in rtw_pci_free_rx_ring()
200 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; in rtw_pci_free_rx_ring()
204 dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma); in rtw_pci_free_rx_ring()
209 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_free_trx_ring()
215 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_free_trx_ring()
220 rx_ring = &rtwpci->rx_rings[i]; in rtw_pci_free_trx_ring()
229 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_init_tx_ring()
231 dma_addr_t dma; in rtw_pci_init_tx_ring() local
236 return -EINVAL; in rtw_pci_init_tx_ring()
239 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); in rtw_pci_init_tx_ring()
242 return -ENOMEM; in rtw_pci_init_tx_ring()
245 skb_queue_head_init(&tx_ring->queue); in rtw_pci_init_tx_ring()
246 tx_ring->r.head = head; in rtw_pci_init_tx_ring()
247 tx_ring->r.dma = dma; in rtw_pci_init_tx_ring()
248 tx_ring->r.len = len; in rtw_pci_init_tx_ring()
249 tx_ring->r.desc_size = desc_size; in rtw_pci_init_tx_ring()
250 tx_ring->r.wp = 0; in rtw_pci_init_tx_ring()
251 tx_ring->r.rp = 0; in rtw_pci_init_tx_ring()
260 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_reset_rx_desc()
263 dma_addr_t dma; in rtw_pci_reset_rx_desc() local
266 return -EINVAL; in rtw_pci_reset_rx_desc()
268 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); in rtw_pci_reset_rx_desc()
269 if (dma_mapping_error(&pdev->dev, dma)) in rtw_pci_reset_rx_desc()
270 return -EBUSY; in rtw_pci_reset_rx_desc()
272 *((dma_addr_t *)skb->cb) = dma; in rtw_pci_reset_rx_desc()
273 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_reset_rx_desc()
276 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); in rtw_pci_reset_rx_desc()
277 buf_desc->dma = cpu_to_le32(dma); in rtw_pci_reset_rx_desc()
282 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma, in rtw_pci_sync_rx_desc_device() argument
286 struct device *dev = rtwdev->dev; in rtw_pci_sync_rx_desc_device()
290 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE); in rtw_pci_sync_rx_desc_device()
292 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_sync_rx_desc_device()
295 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); in rtw_pci_sync_rx_desc_device()
296 buf_desc->dma = cpu_to_le32(dma); in rtw_pci_sync_rx_desc_device()
303 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_init_rx_ring()
305 dma_addr_t dma; in rtw_pci_init_rx_ring() local
312 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); in rtw_pci_init_rx_ring()
315 return -ENOMEM; in rtw_pci_init_rx_ring()
317 rx_ring->r.head = head; in rtw_pci_init_rx_ring()
323 ret = -ENOMEM; in rtw_pci_init_rx_ring()
327 memset(skb->data, 0, buf_sz); in rtw_pci_init_rx_ring()
328 rx_ring->buf[i] = skb; in rtw_pci_init_rx_ring()
337 rx_ring->r.dma = dma; in rtw_pci_init_rx_ring()
338 rx_ring->r.len = len; in rtw_pci_init_rx_ring()
339 rx_ring->r.desc_size = desc_size; in rtw_pci_init_rx_ring()
340 rx_ring->r.wp = 0; in rtw_pci_init_rx_ring()
341 rx_ring->r.rp = 0; in rtw_pci_init_rx_ring()
347 skb = rx_ring->buf[i]; in rtw_pci_init_rx_ring()
350 dma = *((dma_addr_t *)skb->cb); in rtw_pci_init_rx_ring()
351 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); in rtw_pci_init_rx_ring()
353 rx_ring->buf[i] = NULL; in rtw_pci_init_rx_ring()
355 dma_free_coherent(&pdev->dev, ring_sz, head, dma); in rtw_pci_init_rx_ring()
364 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_init_trx_ring()
367 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_init_trx_ring()
373 tx_desc_size = chip->tx_buf_desc_sz; in rtw_pci_init_trx_ring()
376 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring()
383 rx_desc_size = chip->rx_buf_desc_sz; in rtw_pci_init_trx_ring()
386 rx_ring = &rtwpci->rx_rings[j]; in rtw_pci_init_trx_ring()
398 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring()
404 rx_ring = &rtwpci->rx_rings[j]; in rtw_pci_init_trx_ring()
418 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_init()
421 rtwpci->irq_mask[0] = IMR_HIGHDOK | in rtw_pci_init()
431 rtwpci->irq_mask[1] = IMR_TXFOVW | in rtw_pci_init()
433 rtwpci->irq_mask[3] = IMR_H2CDOK | in rtw_pci_init()
435 spin_lock_init(&rtwpci->irq_lock); in rtw_pci_init()
436 spin_lock_init(&rtwpci->hwirq_lock); in rtw_pci_init()
444 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_reset_buf_desc()
447 dma_addr_t dma; in rtw_pci_reset_buf_desc() local
452 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; in rtw_pci_reset_buf_desc()
453 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma); in rtw_pci_reset_buf_desc()
456 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; in rtw_pci_reset_buf_desc()
457 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; in rtw_pci_reset_buf_desc()
458 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; in rtw_pci_reset_buf_desc()
459 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; in rtw_pci_reset_buf_desc()
461 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma); in rtw_pci_reset_buf_desc()
464 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; in rtw_pci_reset_buf_desc()
465 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; in rtw_pci_reset_buf_desc()
466 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; in rtw_pci_reset_buf_desc()
467 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; in rtw_pci_reset_buf_desc()
469 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma); in rtw_pci_reset_buf_desc()
471 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; in rtw_pci_reset_buf_desc()
472 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; in rtw_pci_reset_buf_desc()
473 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; in rtw_pci_reset_buf_desc()
474 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; in rtw_pci_reset_buf_desc()
476 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma); in rtw_pci_reset_buf_desc()
478 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; in rtw_pci_reset_buf_desc()
479 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; in rtw_pci_reset_buf_desc()
480 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; in rtw_pci_reset_buf_desc()
481 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; in rtw_pci_reset_buf_desc()
483 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma); in rtw_pci_reset_buf_desc()
485 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; in rtw_pci_reset_buf_desc()
486 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; in rtw_pci_reset_buf_desc()
487 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; in rtw_pci_reset_buf_desc()
488 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; in rtw_pci_reset_buf_desc()
490 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma); in rtw_pci_reset_buf_desc()
492 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; in rtw_pci_reset_buf_desc()
493 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; in rtw_pci_reset_buf_desc()
494 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; in rtw_pci_reset_buf_desc()
495 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; in rtw_pci_reset_buf_desc()
497 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma); in rtw_pci_reset_buf_desc()
499 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; in rtw_pci_reset_buf_desc()
500 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; in rtw_pci_reset_buf_desc()
501 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; in rtw_pci_reset_buf_desc()
502 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; in rtw_pci_reset_buf_desc()
504 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma); in rtw_pci_reset_buf_desc()
506 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; in rtw_pci_reset_buf_desc()
507 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; in rtw_pci_reset_buf_desc()
508 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; in rtw_pci_reset_buf_desc()
509 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; in rtw_pci_reset_buf_desc()
511 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma); in rtw_pci_reset_buf_desc()
533 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_enable_interrupt()
535 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask); in rtw_pci_enable_interrupt()
536 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); in rtw_pci_enable_interrupt()
538 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); in rtw_pci_enable_interrupt()
540 rtwpci->irq_enabled = true; in rtw_pci_enable_interrupt()
542 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_enable_interrupt()
550 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_disable_interrupt()
552 if (!rtwpci->irq_enabled) in rtw_pci_disable_interrupt()
560 rtwpci->irq_enabled = false; in rtw_pci_disable_interrupt()
563 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_disable_interrupt()
568 /* reset dma and rx tag */ in rtw_pci_dma_reset()
571 rtwpci->rx_tag = 0; in rtw_pci_dma_reset()
576 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_setup()
591 tx_ring = &rtwpci->tx_rings[queue]; in rtw_pci_dma_release()
598 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_start()
600 if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) in rtw_pci_napi_start()
603 napi_enable(&rtwpci->napi); in rtw_pci_napi_start()
608 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_stop()
610 if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) in rtw_pci_napi_stop()
613 napi_synchronize(&rtwpci->napi); in rtw_pci_napi_stop()
614 napi_disable(&rtwpci->napi); in rtw_pci_napi_stop()
619 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_start()
623 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_start()
624 rtwpci->running = true; in rtw_pci_start()
626 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_start()
633 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_stop()
634 struct pci_dev *pdev = rtwpci->pdev; in rtw_pci_stop()
636 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
637 rtwpci->running = false; in rtw_pci_stop()
639 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
641 synchronize_irq(pdev->irq); in rtw_pci_stop()
644 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
646 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
651 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps_enter()
656 if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) in rtw_pci_deep_ps_enter()
659 lockdep_assert_held(&rtwpci->irq_lock); in rtw_pci_deep_ps_enter()
661 /* Deep PS state is not allowed to TX-DMA */ in rtw_pci_deep_ps_enter()
663 /* BCN queue is rsvd page, does not have DMA interrupt in rtw_pci_deep_ps_enter()
670 tx_ring = &rtwpci->tx_rings[queue]; in rtw_pci_deep_ps_enter()
673 if (skb_queue_len(&tx_ring->queue)) { in rtw_pci_deep_ps_enter()
685 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); in rtw_pci_deep_ps_enter()
692 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps_leave()
694 lockdep_assert_held(&rtwpci->irq_lock); in rtw_pci_deep_ps_leave()
696 lockdep_assert_held(&((struct rtw_pci *)rtwdev->priv)->irq_lock); in rtw_pci_deep_ps_leave()
699 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps_leave()
705 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps()
707 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_deep_ps()
709 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps()
712 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps()
715 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_deep_ps()
721 struct sk_buff *prev = skb_dequeue(&ring->queue); in rtw_pci_release_rsvd_page()
723 dma_addr_t dma; in rtw_pci_release_rsvd_page() local
729 dma = tx_data->dma; in rtw_pci_release_rsvd_page()
730 dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE); in rtw_pci_release_rsvd_page()
738 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_dma_check()
739 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_dma_check()
741 u32 desc_sz = chip->rx_buf_desc_sz; in rtw_pci_dma_check()
744 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_dma_check()
746 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); in rtw_pci_dma_check()
749 if (total_pkt_size != rtwpci->rx_tag) in rtw_pci_dma_check()
750 rtw_warn(rtwdev, "pci bus timeout, check dma status\n"); in rtw_pci_dma_check()
752 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; in rtw_pci_dma_check()
765 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in __pci_flush_queue()
766 struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; in __pci_flush_queue()
777 if (cur_rp == ring->r.wp) in __pci_flush_queue()
812 if (queues == BIT(rtwdev->hw->queues) - 1) { in rtw_pci_flush_queues()
813 pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; in rtw_pci_flush_queues()
815 for (i = 0; i < rtwdev->hw->queues; i++) in rtw_pci_flush_queues()
826 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_kick_off_queue()
830 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_kick_off_queue()
833 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_kick_off_queue()
834 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) in rtw_pci_tx_kick_off_queue()
836 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK); in rtw_pci_tx_kick_off_queue()
837 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_kick_off_queue()
842 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_kick_off()
846 if (test_and_clear_bit(queue, rtwpci->tx_queued)) in rtw_pci_tx_kick_off()
855 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_write_data()
856 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_tx_write_data()
859 dma_addr_t dma; in rtw_pci_tx_write_data() local
860 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; in rtw_pci_tx_write_data()
861 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; in rtw_pci_tx_write_data()
867 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_write_data()
869 size = skb->len; in rtw_pci_tx_write_data()
873 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) in rtw_pci_tx_write_data()
874 return -ENOSPC; in rtw_pci_tx_write_data()
876 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); in rtw_pci_tx_write_data()
878 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); in rtw_pci_tx_write_data()
880 dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, in rtw_pci_tx_write_data()
882 if (dma_mapping_error(&rtwpci->pdev->dev, dma)) in rtw_pci_tx_write_data()
883 return -EBUSY; in rtw_pci_tx_write_data()
885 /* after this we got dma mapped, there is no way back */ in rtw_pci_tx_write_data()
888 psb_len = (skb->len - 1) / 128 + 1; in rtw_pci_tx_write_data()
894 buf_desc[0].dma = cpu_to_le32(dma); in rtw_pci_tx_write_data()
896 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz); in rtw_pci_tx_write_data()
899 tx_data->dma = dma; in rtw_pci_tx_write_data()
900 tx_data->sn = pkt_info->sn; in rtw_pci_tx_write_data()
902 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write_data()
904 skb_queue_tail(&ring->queue, skb); in rtw_pci_tx_write_data()
909 /* update write-index, and kick it off later */ in rtw_pci_tx_write_data()
910 set_bit(queue, rtwpci->tx_queued); in rtw_pci_tx_write_data()
911 if (++ring->r.wp >= ring->r.len) in rtw_pci_tx_write_data()
912 ring->r.wp = 0; in rtw_pci_tx_write_data()
915 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write_data()
930 return -ENOMEM; in rtw_pci_write_data_rsvd_page()
957 return -ENOMEM; in rtw_pci_write_data_h2c()
978 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_write()
986 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_write()
987 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write()
988 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { in rtw_pci_tx_write()
989 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); in rtw_pci_tx_write()
990 ring->queue_stopped = true; in rtw_pci_tx_write()
992 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write()
1000 struct ieee80211_hw *hw = rtwdev->hw; in rtw_pci_tx_isr()
1010 ring = &rtwpci->tx_rings[hw_queue]; in rtw_pci_tx_isr()
1016 rp_idx = ring->r.rp; in rtw_pci_tx_isr()
1017 if (cur_rp >= ring->r.rp) in rtw_pci_tx_isr()
1018 count = cur_rp - ring->r.rp; in rtw_pci_tx_isr()
1020 count = ring->r.len - (ring->r.rp - cur_rp); in rtw_pci_tx_isr()
1022 while (count--) { in rtw_pci_tx_isr()
1023 skb = skb_dequeue(&ring->queue); in rtw_pci_tx_isr()
1025 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n", in rtw_pci_tx_isr()
1026 count, hw_queue, bd_idx, ring->r.rp, cur_rp); in rtw_pci_tx_isr()
1030 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, in rtw_pci_tx_isr()
1039 if (ring->queue_stopped && in rtw_pci_tx_isr()
1040 avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { in rtw_pci_tx_isr()
1043 ring->queue_stopped = false; in rtw_pci_tx_isr()
1046 if (++rp_idx >= ring->r.len) in rtw_pci_tx_isr()
1049 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); in rtw_pci_tx_isr()
1054 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { in rtw_pci_tx_isr()
1055 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); in rtw_pci_tx_isr()
1060 if (info->flags & IEEE80211_TX_CTL_NO_ACK) in rtw_pci_tx_isr()
1061 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; in rtw_pci_tx_isr()
1063 info->flags |= IEEE80211_TX_STAT_ACK; in rtw_pci_tx_isr()
1069 ring->r.rp = cur_rp; in rtw_pci_tx_isr()
1074 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_rx_isr()
1075 struct napi_struct *napi = &rtwpci->napi; in rtw_pci_rx_isr()
1087 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; in rtw_pci_get_hw_rx_ring_nr()
1090 if (cur_wp >= ring->r.wp) in rtw_pci_get_hw_rx_ring_nr()
1091 count = cur_wp - ring->r.wp; in rtw_pci_get_hw_rx_ring_nr()
1093 count = ring->r.len - (ring->r.wp - cur_wp); in rtw_pci_get_hw_rx_ring_nr()
1101 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_rx_napi()
1102 struct napi_struct *napi = &rtwpci->napi; in rtw_pci_rx_napi()
1103 struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; in rtw_pci_rx_napi()
1107 u32 cur_rp = ring->r.rp; in rtw_pci_rx_napi()
1110 u32 pkt_desc_sz = chip->rx_pkt_desc_sz; in rtw_pci_rx_napi()
1111 u32 buf_desc_sz = chip->rx_buf_desc_sz; in rtw_pci_rx_napi()
1114 dma_addr_t dma; in rtw_pci_rx_napi() local
1119 while (count--) { in rtw_pci_rx_napi()
1121 skb = ring->buf[cur_rp]; in rtw_pci_rx_napi()
1122 dma = *((dma_addr_t *)skb->cb); in rtw_pci_rx_napi()
1123 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, in rtw_pci_rx_napi()
1125 rx_desc = skb->data; in rtw_pci_rx_napi()
1140 /* put the DMA data including rx_desc from phy to new skb */ in rtw_pci_rx_napi()
1141 skb_put_data(new, skb->data, new_len); in rtw_pci_rx_napi()
1151 memcpy(new->cb, &rx_status, sizeof(rx_status)); in rtw_pci_rx_napi()
1152 ieee80211_rx_napi(rtwdev->hw, NULL, new, napi); in rtw_pci_rx_napi()
1157 /* new skb delivered to mac80211, re-enable original skb DMA */ in rtw_pci_rx_napi()
1158 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp, in rtw_pci_rx_napi()
1162 if (++cur_rp >= ring->r.len) in rtw_pci_rx_napi()
1166 ring->r.rp = cur_rp; in rtw_pci_rx_napi()
1170 ring->r.wp = cur_rp; in rtw_pci_rx_napi()
1171 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); in rtw_pci_rx_napi()
1181 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_irq_recognized()
1189 irq_status[0] &= rtwpci->irq_mask[0]; in rtw_pci_irq_recognized()
1190 irq_status[1] &= rtwpci->irq_mask[1]; in rtw_pci_irq_recognized()
1191 irq_status[3] &= rtwpci->irq_mask[3]; in rtw_pci_irq_recognized()
1197 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_irq_recognized()
1203 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_interrupt_handler()
1209 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs in rtw_pci_interrupt_handler()
1210 * are cleared, the edge-triggered interrupt will not be generated when in rtw_pci_interrupt_handler()
1221 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_interrupt_threadfn()
1225 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_interrupt_threadfn()
1250 if (rtwpci->running) in rtw_pci_interrupt_threadfn()
1252 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_interrupt_threadfn()
1260 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_io_mapping()
1272 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); in rtw_pci_io_mapping()
1274 rtw_err(rtwdev, "failed to set dma mask to 32-bit\n"); in rtw_pci_io_mapping()
1278 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); in rtw_pci_io_mapping()
1280 rtw_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); in rtw_pci_io_mapping()
1289 rtwpci->mmap = pci_iomap(pdev, bar_id, len); in rtw_pci_io_mapping()
1290 if (!rtwpci->mmap) { in rtw_pci_io_mapping()
1293 return -ENOMEM; in rtw_pci_io_mapping()
1307 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_io_unmapping()
1309 if (rtwpci->mmap) { in rtw_pci_io_unmapping()
1310 pci_iounmap(pdev, rtwpci->mmap); in rtw_pci_io_unmapping()
1320 u8 cnt; in rtw_dbi_write8() local
1328 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { in rtw_dbi_write8()
1343 u8 cnt; in rtw_dbi_read8() local
1348 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { in rtw_dbi_read8()
1360 return -EIO; in rtw_dbi_read8()
1367 u8 cnt; in rtw_mdio_write() local
1377 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) { in rtw_mdio_write()
1454 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_link_ps()
1461 * experienced some inter-operability issues that the link tends in rtw_pci_link_ps()
1466 if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)) in rtw_pci_link_ps()
1469 if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) || in rtw_pci_link_ps()
1470 (!enter && atomic_inc_return(&rtwpci->link_usage) == 1)) in rtw_pci_link_ps()
1476 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_link_cfg()
1477 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_link_cfg()
1478 struct pci_dev *pdev = rtwpci->pdev; in rtw_pci_link_cfg()
1485 if (chip->id == RTW_CHIP_TYPE_8822C) in rtw_pci_link_cfg()
1497 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device in rtw_pci_link_cfg()
1513 rtwpci->link_ctrl = link_ctrl; in rtw_pci_link_cfg()
1518 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_interface_cfg()
1520 switch (chip->id) { in rtw_pci_interface_cfg()
1522 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D) in rtw_pci_interface_cfg()
1533 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_phy_cfg()
1534 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_phy_cfg()
1535 struct rtw_efuse *efuse = &rtwdev->efuse; in rtw_pci_phy_cfg()
1536 struct pci_dev *pdev = rtwpci->pdev; in rtw_pci_phy_cfg()
1544 cut = BIT(0) << rtwdev->hal.cut_version; in rtw_pci_phy_cfg()
1546 for (i = 0; i < chip->intf_table->n_gen1_para; i++) { in rtw_pci_phy_cfg()
1547 para = &chip->intf_table->gen1_para[i]; in rtw_pci_phy_cfg()
1548 if (!(para->cut_mask & cut)) in rtw_pci_phy_cfg()
1550 if (para->offset == 0xffff) in rtw_pci_phy_cfg()
1552 offset = para->offset; in rtw_pci_phy_cfg()
1553 value = para->value; in rtw_pci_phy_cfg()
1554 if (para->ip_sel == RTW_IP_SEL_PHY) in rtw_pci_phy_cfg()
1560 for (i = 0; i < chip->intf_table->n_gen2_para; i++) { in rtw_pci_phy_cfg()
1561 para = &chip->intf_table->gen2_para[i]; in rtw_pci_phy_cfg()
1562 if (!(para->cut_mask & cut)) in rtw_pci_phy_cfg()
1564 if (para->offset == 0xffff) in rtw_pci_phy_cfg()
1566 offset = para->offset; in rtw_pci_phy_cfg()
1567 value = para->value; in rtw_pci_phy_cfg()
1568 if (para->ip_sel == RTW_IP_SEL_PHY) in rtw_pci_phy_cfg()
1577 if (chip->id == RTW_CHIP_TYPE_8821C) { in rtw_pci_phy_cfg()
1585 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5) in rtw_pci_phy_cfg()
1592 struct rtw_dev *rtwdev = hw->priv; in rtw_pci_suspend()
1593 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_suspend()
1594 struct rtw_efuse *efuse = &rtwdev->efuse; in rtw_pci_suspend()
1596 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) in rtw_pci_suspend()
1604 struct rtw_dev *rtwdev = hw->priv; in rtw_pci_resume()
1605 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_resume()
1606 struct rtw_efuse *efuse = &rtwdev->efuse; in rtw_pci_resume()
1608 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) in rtw_pci_resume()
1627 pci_set_drvdata(pdev, rtwdev->hw); in rtw_pci_claim()
1628 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); in rtw_pci_claim()
1643 rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_setup_resource()
1644 rtwpci->pdev = pdev; in rtw_pci_setup_resource()
1711 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, in rtw_pci_request_irq()
1725 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); in rtw_pci_free_irq()
1736 if (rtwpci->rx_no_aspm) in rtw_pci_napi_poll()
1743 budget - work_done); in rtw_pci_napi_poll()
1750 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_napi_poll()
1751 if (rtwpci->running) in rtw_pci_napi_poll()
1753 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_napi_poll()
1756 * not be processed immediately. Check whether dma ring is in rtw_pci_napi_poll()
1762 if (rtwpci->rx_no_aspm) in rtw_pci_napi_poll()
1770 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_init()
1772 rtwpci->netdev = alloc_netdev_dummy(0); in rtw_pci_napi_init()
1773 if (!rtwpci->netdev) in rtw_pci_napi_init()
1774 return -ENOMEM; in rtw_pci_napi_init()
1776 netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll); in rtw_pci_napi_init()
1782 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_deinit()
1785 netif_napi_del(&rtwpci->napi); in rtw_pci_napi_deinit()
1786 free_netdev(rtwpci->netdev); in rtw_pci_napi_deinit()
1802 struct rtw_dev *rtwdev = hw->priv; in rtw_pci_io_slot_reset()
1839 dev_err(&pdev->dev, "failed to allocate hw\n"); in rtw_pci_probe()
1840 return -ENOMEM; in rtw_pci_probe()
1843 rtwdev = hw->priv; in rtw_pci_probe()
1844 rtwdev->hw = hw; in rtw_pci_probe()
1845 rtwdev->dev = &pdev->dev; in rtw_pci_probe()
1846 rtwdev->chip = (struct rtw_chip_info *)id->driver_data; in rtw_pci_probe()
1847 rtwdev->hci.ops = &rtw_pci_ops; in rtw_pci_probe()
1848 rtwdev->hci.type = RTW_HCI_TYPE_PCIE; in rtw_pci_probe()
1850 rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_probe()
1851 atomic_set(&rtwpci->link_usage, 1); in rtw_pci_probe()
1859 pdev->vendor, pdev->device, pdev->revision); in rtw_pci_probe()
1885 /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */ in rtw_pci_probe()
1886 if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL) in rtw_pci_probe()
1887 rtwpci->rx_no_aspm = true; in rtw_pci_probe()
1931 rtwdev = hw->priv; in rtw_pci_remove()
1932 rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_remove()
1954 rtwdev = hw->priv; in rtw_pci_shutdown()
1955 chip = rtwdev->chip; in rtw_pci_shutdown()
1957 if (chip->ops->shutdown) in rtw_pci_shutdown()
1958 chip->ops->shutdown(rtwdev); in rtw_pci_shutdown()