Lines Matching +full:efuse +full:- +full:settings

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
46 return skb->priority; in rtw_pci_get_tx_qsel()
52 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read8()
54 return readb(rtwpci->mmap + addr); in rtw_pci_read8()
59 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read16()
61 return readw(rtwpci->mmap + addr); in rtw_pci_read16()
66 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read32()
68 return readl(rtwpci->mmap + addr); in rtw_pci_read32()
73 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write8()
75 writeb(val, rtwpci->mmap + addr); in rtw_pci_write8()
80 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write16()
82 writew(val, rtwpci->mmap + addr); in rtw_pci_write16()
87 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write32()
89 writel(val, rtwpci->mmap + addr); in rtw_pci_write32()
95 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_tx_ring_skbs()
101 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { in rtw_pci_free_tx_ring_skbs()
102 __skb_unlink(skb, &tx_ring->queue); in rtw_pci_free_tx_ring_skbs()
104 dma = tx_data->dma; in rtw_pci_free_tx_ring_skbs()
106 dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE); in rtw_pci_free_tx_ring_skbs()
114 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_tx_ring()
115 u8 *head = tx_ring->r.head; in rtw_pci_free_tx_ring()
116 u32 len = tx_ring->r.len; in rtw_pci_free_tx_ring()
117 int ring_sz = len * tx_ring->r.desc_size; in rtw_pci_free_tx_ring()
122 dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma); in rtw_pci_free_tx_ring()
123 tx_ring->r.head = NULL; in rtw_pci_free_tx_ring()
129 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_rx_ring_skbs()
135 for (i = 0; i < rx_ring->r.len; i++) { in rtw_pci_free_rx_ring_skbs()
136 skb = rx_ring->buf[i]; in rtw_pci_free_rx_ring_skbs()
140 dma = *((dma_addr_t *)skb->cb); in rtw_pci_free_rx_ring_skbs()
141 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); in rtw_pci_free_rx_ring_skbs()
143 rx_ring->buf[i] = NULL; in rtw_pci_free_rx_ring_skbs()
150 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_rx_ring()
151 u8 *head = rx_ring->r.head; in rtw_pci_free_rx_ring()
152 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; in rtw_pci_free_rx_ring()
156 dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma); in rtw_pci_free_rx_ring()
161 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_free_trx_ring()
167 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_free_trx_ring()
172 rx_ring = &rtwpci->rx_rings[i]; in rtw_pci_free_trx_ring()
181 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_init_tx_ring()
188 return -EINVAL; in rtw_pci_init_tx_ring()
191 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); in rtw_pci_init_tx_ring()
194 return -ENOMEM; in rtw_pci_init_tx_ring()
197 skb_queue_head_init(&tx_ring->queue); in rtw_pci_init_tx_ring()
198 tx_ring->r.head = head; in rtw_pci_init_tx_ring()
199 tx_ring->r.dma = dma; in rtw_pci_init_tx_ring()
200 tx_ring->r.len = len; in rtw_pci_init_tx_ring()
201 tx_ring->r.desc_size = desc_size; in rtw_pci_init_tx_ring()
202 tx_ring->r.wp = 0; in rtw_pci_init_tx_ring()
203 tx_ring->r.rp = 0; in rtw_pci_init_tx_ring()
212 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_reset_rx_desc()
218 return -EINVAL; in rtw_pci_reset_rx_desc()
220 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); in rtw_pci_reset_rx_desc()
221 if (dma_mapping_error(&pdev->dev, dma)) in rtw_pci_reset_rx_desc()
222 return -EBUSY; in rtw_pci_reset_rx_desc()
224 *((dma_addr_t *)skb->cb) = dma; in rtw_pci_reset_rx_desc()
225 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_reset_rx_desc()
228 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); in rtw_pci_reset_rx_desc()
229 buf_desc->dma = cpu_to_le32(dma); in rtw_pci_reset_rx_desc()
238 struct device *dev = rtwdev->dev; in rtw_pci_sync_rx_desc_device()
244 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_sync_rx_desc_device()
247 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); in rtw_pci_sync_rx_desc_device()
248 buf_desc->dma = cpu_to_le32(dma); in rtw_pci_sync_rx_desc_device()
255 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_init_rx_ring()
264 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); in rtw_pci_init_rx_ring()
267 return -ENOMEM; in rtw_pci_init_rx_ring()
269 rx_ring->r.head = head; in rtw_pci_init_rx_ring()
275 ret = -ENOMEM; in rtw_pci_init_rx_ring()
279 memset(skb->data, 0, buf_sz); in rtw_pci_init_rx_ring()
280 rx_ring->buf[i] = skb; in rtw_pci_init_rx_ring()
289 rx_ring->r.dma = dma; in rtw_pci_init_rx_ring()
290 rx_ring->r.len = len; in rtw_pci_init_rx_ring()
291 rx_ring->r.desc_size = desc_size; in rtw_pci_init_rx_ring()
292 rx_ring->r.wp = 0; in rtw_pci_init_rx_ring()
293 rx_ring->r.rp = 0; in rtw_pci_init_rx_ring()
299 skb = rx_ring->buf[i]; in rtw_pci_init_rx_ring()
302 dma = *((dma_addr_t *)skb->cb); in rtw_pci_init_rx_ring()
303 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); in rtw_pci_init_rx_ring()
305 rx_ring->buf[i] = NULL; in rtw_pci_init_rx_ring()
307 dma_free_coherent(&pdev->dev, ring_sz, head, dma); in rtw_pci_init_rx_ring()
316 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_init_trx_ring()
319 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_init_trx_ring()
325 tx_desc_size = chip->tx_buf_desc_sz; in rtw_pci_init_trx_ring()
328 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring()
335 rx_desc_size = chip->rx_buf_desc_sz; in rtw_pci_init_trx_ring()
338 rx_ring = &rtwpci->rx_rings[j]; in rtw_pci_init_trx_ring()
350 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring()
356 rx_ring = &rtwpci->rx_rings[j]; in rtw_pci_init_trx_ring()
370 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_init()
373 rtwpci->irq_mask[0] = IMR_HIGHDOK | in rtw_pci_init()
383 rtwpci->irq_mask[1] = IMR_TXFOVW | in rtw_pci_init()
385 rtwpci->irq_mask[3] = IMR_H2CDOK | in rtw_pci_init()
387 spin_lock_init(&rtwpci->irq_lock); in rtw_pci_init()
388 spin_lock_init(&rtwpci->hwirq_lock); in rtw_pci_init()
396 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_reset_buf_desc()
404 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; in rtw_pci_reset_buf_desc()
408 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; in rtw_pci_reset_buf_desc()
409 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; in rtw_pci_reset_buf_desc()
410 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; in rtw_pci_reset_buf_desc()
411 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; in rtw_pci_reset_buf_desc()
416 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; in rtw_pci_reset_buf_desc()
417 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; in rtw_pci_reset_buf_desc()
418 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; in rtw_pci_reset_buf_desc()
419 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; in rtw_pci_reset_buf_desc()
423 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; in rtw_pci_reset_buf_desc()
424 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; in rtw_pci_reset_buf_desc()
425 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; in rtw_pci_reset_buf_desc()
426 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; in rtw_pci_reset_buf_desc()
430 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; in rtw_pci_reset_buf_desc()
431 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; in rtw_pci_reset_buf_desc()
432 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; in rtw_pci_reset_buf_desc()
433 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; in rtw_pci_reset_buf_desc()
437 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; in rtw_pci_reset_buf_desc()
438 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; in rtw_pci_reset_buf_desc()
439 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; in rtw_pci_reset_buf_desc()
440 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; in rtw_pci_reset_buf_desc()
444 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; in rtw_pci_reset_buf_desc()
445 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; in rtw_pci_reset_buf_desc()
446 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; in rtw_pci_reset_buf_desc()
447 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; in rtw_pci_reset_buf_desc()
451 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; in rtw_pci_reset_buf_desc()
452 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; in rtw_pci_reset_buf_desc()
453 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; in rtw_pci_reset_buf_desc()
454 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; in rtw_pci_reset_buf_desc()
458 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; in rtw_pci_reset_buf_desc()
459 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; in rtw_pci_reset_buf_desc()
460 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; in rtw_pci_reset_buf_desc()
461 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; in rtw_pci_reset_buf_desc()
485 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_enable_interrupt()
487 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask); in rtw_pci_enable_interrupt()
488 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); in rtw_pci_enable_interrupt()
490 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); in rtw_pci_enable_interrupt()
492 rtwpci->irq_enabled = true; in rtw_pci_enable_interrupt()
494 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_enable_interrupt()
502 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_disable_interrupt()
504 if (!rtwpci->irq_enabled) in rtw_pci_disable_interrupt()
512 rtwpci->irq_enabled = false; in rtw_pci_disable_interrupt()
515 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_disable_interrupt()
523 rtwpci->rx_tag = 0; in rtw_pci_dma_reset()
528 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_setup()
543 tx_ring = &rtwpci->tx_rings[queue]; in rtw_pci_dma_release()
550 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_start()
552 if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) in rtw_pci_napi_start()
555 napi_enable(&rtwpci->napi); in rtw_pci_napi_start()
560 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_stop()
562 if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) in rtw_pci_napi_stop()
565 napi_synchronize(&rtwpci->napi); in rtw_pci_napi_stop()
566 napi_disable(&rtwpci->napi); in rtw_pci_napi_stop()
571 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_start()
575 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_start()
576 rtwpci->running = true; in rtw_pci_start()
578 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_start()
585 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_stop()
586 struct pci_dev *pdev = rtwpci->pdev; in rtw_pci_stop()
588 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
589 rtwpci->running = false; in rtw_pci_stop()
591 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
593 synchronize_irq(pdev->irq); in rtw_pci_stop()
596 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
598 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
603 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps_enter()
608 if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) in rtw_pci_deep_ps_enter()
611 lockdep_assert_held(&rtwpci->irq_lock); in rtw_pci_deep_ps_enter()
613 /* Deep PS state is not allowed to TX-DMA */ in rtw_pci_deep_ps_enter()
622 tx_ring = &rtwpci->tx_rings[queue]; in rtw_pci_deep_ps_enter()
625 if (skb_queue_len(&tx_ring->queue)) { in rtw_pci_deep_ps_enter()
637 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); in rtw_pci_deep_ps_enter()
643 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps_leave()
645 lockdep_assert_held(&rtwpci->irq_lock); in rtw_pci_deep_ps_leave()
647 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps_leave()
653 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps()
655 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_deep_ps()
657 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps()
660 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps()
663 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_deep_ps()
669 struct sk_buff *prev = skb_dequeue(&ring->queue); in rtw_pci_release_rsvd_page()
677 dma = tx_data->dma; in rtw_pci_release_rsvd_page()
678 dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE); in rtw_pci_release_rsvd_page()
686 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_dma_check()
687 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_dma_check()
689 u32 desc_sz = chip->rx_buf_desc_sz; in rtw_pci_dma_check()
692 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_dma_check()
694 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); in rtw_pci_dma_check()
697 if (total_pkt_size != rtwpci->rx_tag) in rtw_pci_dma_check()
700 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; in rtw_pci_dma_check()
713 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in __pci_flush_queue()
714 struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; in __pci_flush_queue()
725 if (cur_rp == ring->r.wp) in __pci_flush_queue()
760 if (queues == BIT(rtwdev->hw->queues) - 1) { in rtw_pci_flush_queues()
761 pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; in rtw_pci_flush_queues()
763 for (i = 0; i < rtwdev->hw->queues; i++) in rtw_pci_flush_queues()
774 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_kick_off_queue()
778 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_kick_off_queue()
781 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_kick_off_queue()
782 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE)) in rtw_pci_tx_kick_off_queue()
784 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK); in rtw_pci_tx_kick_off_queue()
785 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_kick_off_queue()
790 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_kick_off()
794 if (test_and_clear_bit(queue, rtwpci->tx_queued)) in rtw_pci_tx_kick_off()
803 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_write_data()
804 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_tx_write_data()
808 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; in rtw_pci_tx_write_data()
809 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; in rtw_pci_tx_write_data()
815 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_write_data()
817 size = skb->len; in rtw_pci_tx_write_data()
821 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) in rtw_pci_tx_write_data()
822 return -ENOSPC; in rtw_pci_tx_write_data()
824 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); in rtw_pci_tx_write_data()
826 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); in rtw_pci_tx_write_data()
828 dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, in rtw_pci_tx_write_data()
830 if (dma_mapping_error(&rtwpci->pdev->dev, dma)) in rtw_pci_tx_write_data()
831 return -EBUSY; in rtw_pci_tx_write_data()
836 psb_len = (skb->len - 1) / 128 + 1; in rtw_pci_tx_write_data()
847 tx_data->dma = dma; in rtw_pci_tx_write_data()
848 tx_data->sn = pkt_info->sn; in rtw_pci_tx_write_data()
850 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write_data()
852 skb_queue_tail(&ring->queue, skb); in rtw_pci_tx_write_data()
857 /* update write-index, and kick it off later */ in rtw_pci_tx_write_data()
858 set_bit(queue, rtwpci->tx_queued); in rtw_pci_tx_write_data()
859 if (++ring->r.wp >= ring->r.len) in rtw_pci_tx_write_data()
860 ring->r.wp = 0; in rtw_pci_tx_write_data()
863 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write_data()
878 return -ENOMEM; in rtw_pci_write_data_rsvd_page()
902 return -ENOMEM; in rtw_pci_write_data_h2c()
920 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_write()
928 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_write()
929 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write()
930 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { in rtw_pci_tx_write()
931 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); in rtw_pci_tx_write()
932 ring->queue_stopped = true; in rtw_pci_tx_write()
934 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write()
942 struct ieee80211_hw *hw = rtwdev->hw; in rtw_pci_tx_isr()
952 ring = &rtwpci->tx_rings[hw_queue]; in rtw_pci_tx_isr()
958 rp_idx = ring->r.rp; in rtw_pci_tx_isr()
959 if (cur_rp >= ring->r.rp) in rtw_pci_tx_isr()
960 count = cur_rp - ring->r.rp; in rtw_pci_tx_isr()
962 count = ring->r.len - (ring->r.rp - cur_rp); in rtw_pci_tx_isr()
964 while (count--) { in rtw_pci_tx_isr()
965 skb = skb_dequeue(&ring->queue); in rtw_pci_tx_isr()
967 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n", in rtw_pci_tx_isr()
968 count, hw_queue, bd_idx, ring->r.rp, cur_rp); in rtw_pci_tx_isr()
972 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, in rtw_pci_tx_isr()
981 if (ring->queue_stopped && in rtw_pci_tx_isr()
982 avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { in rtw_pci_tx_isr()
985 ring->queue_stopped = false; in rtw_pci_tx_isr()
988 if (++rp_idx >= ring->r.len) in rtw_pci_tx_isr()
991 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); in rtw_pci_tx_isr()
996 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { in rtw_pci_tx_isr()
997 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); in rtw_pci_tx_isr()
1002 if (info->flags & IEEE80211_TX_CTL_NO_ACK) in rtw_pci_tx_isr()
1003 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; in rtw_pci_tx_isr()
1005 info->flags |= IEEE80211_TX_STAT_ACK; in rtw_pci_tx_isr()
1011 ring->r.rp = cur_rp; in rtw_pci_tx_isr()
1016 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_rx_isr()
1017 struct napi_struct *napi = &rtwpci->napi; in rtw_pci_rx_isr()
1029 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; in rtw_pci_get_hw_rx_ring_nr()
1032 if (cur_wp >= ring->r.wp) in rtw_pci_get_hw_rx_ring_nr()
1033 count = cur_wp - ring->r.wp; in rtw_pci_get_hw_rx_ring_nr()
1035 count = ring->r.len - (ring->r.wp - cur_wp); in rtw_pci_get_hw_rx_ring_nr()
1043 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_rx_napi()
1044 struct napi_struct *napi = &rtwpci->napi; in rtw_pci_rx_napi()
1045 struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; in rtw_pci_rx_napi()
1049 u32 cur_rp = ring->r.rp; in rtw_pci_rx_napi()
1052 u32 pkt_desc_sz = chip->rx_pkt_desc_sz; in rtw_pci_rx_napi()
1053 u32 buf_desc_sz = chip->rx_buf_desc_sz; in rtw_pci_rx_napi()
1061 while (count--) { in rtw_pci_rx_napi()
1063 skb = ring->buf[cur_rp]; in rtw_pci_rx_napi()
1064 dma = *((dma_addr_t *)skb->cb); in rtw_pci_rx_napi()
1065 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, in rtw_pci_rx_napi()
1067 rx_desc = skb->data; in rtw_pci_rx_napi()
1083 skb_put_data(new, skb->data, new_len); in rtw_pci_rx_napi()
1093 memcpy(new->cb, &rx_status, sizeof(rx_status)); in rtw_pci_rx_napi()
1094 ieee80211_rx_napi(rtwdev->hw, NULL, new, napi); in rtw_pci_rx_napi()
1099 /* new skb delivered to mac80211, re-enable original skb DMA */ in rtw_pci_rx_napi()
1104 if (++cur_rp >= ring->r.len) in rtw_pci_rx_napi()
1108 ring->r.rp = cur_rp; in rtw_pci_rx_napi()
1112 ring->r.wp = cur_rp; in rtw_pci_rx_napi()
1113 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); in rtw_pci_rx_napi()
1123 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_irq_recognized()
1131 irq_status[0] &= rtwpci->irq_mask[0]; in rtw_pci_irq_recognized()
1132 irq_status[1] &= rtwpci->irq_mask[1]; in rtw_pci_irq_recognized()
1133 irq_status[3] &= rtwpci->irq_mask[3]; in rtw_pci_irq_recognized()
1139 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_irq_recognized()
1145 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_interrupt_handler()
1151 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs in rtw_pci_interrupt_handler()
1152 * are cleared, the edge-triggered interrupt will not be generated when in rtw_pci_interrupt_handler()
1163 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_interrupt_threadfn()
1167 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_interrupt_threadfn()
1192 if (rtwpci->running) in rtw_pci_interrupt_threadfn()
1194 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_interrupt_threadfn()
1202 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_io_mapping()
1214 rtwpci->mmap = pci_iomap(pdev, bar_id, len); in rtw_pci_io_mapping()
1215 if (!rtwpci->mmap) { in rtw_pci_io_mapping()
1218 return -ENOMEM; in rtw_pci_io_mapping()
1227 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_io_unmapping()
1229 if (rtwpci->mmap) { in rtw_pci_io_unmapping()
1230 pci_iounmap(pdev, rtwpci->mmap); in rtw_pci_io_unmapping()
1280 return -EIO; in rtw_dbi_read8()
1374 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_link_ps()
1381 * experienced some inter-operability issues that the link tends in rtw_pci_link_ps()
1386 if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)) in rtw_pci_link_ps()
1389 if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) || in rtw_pci_link_ps()
1390 (!enter && atomic_inc_return(&rtwpci->link_usage) == 1)) in rtw_pci_link_ps()
1396 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_link_cfg()
1397 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_link_cfg()
1398 struct pci_dev *pdev = rtwpci->pdev; in rtw_pci_link_cfg()
1405 if (chip->id == RTW_CHIP_TYPE_8822C) in rtw_pci_link_cfg()
1414 * follow the host settings, and another is in charge of doing in rtw_pci_link_cfg()
1417 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device in rtw_pci_link_cfg()
1433 rtwpci->link_ctrl = link_ctrl; in rtw_pci_link_cfg()
1438 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_interface_cfg()
1440 switch (chip->id) { in rtw_pci_interface_cfg()
1442 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D) in rtw_pci_interface_cfg()
1453 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_phy_cfg()
1454 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_phy_cfg()
1455 struct rtw_efuse *efuse = &rtwdev->efuse; in rtw_pci_phy_cfg() local
1456 struct pci_dev *pdev = rtwpci->pdev; in rtw_pci_phy_cfg()
1464 cut = BIT(0) << rtwdev->hal.cut_version; in rtw_pci_phy_cfg()
1466 for (i = 0; i < chip->intf_table->n_gen1_para; i++) { in rtw_pci_phy_cfg()
1467 para = &chip->intf_table->gen1_para[i]; in rtw_pci_phy_cfg()
1468 if (!(para->cut_mask & cut)) in rtw_pci_phy_cfg()
1470 if (para->offset == 0xffff) in rtw_pci_phy_cfg()
1472 offset = para->offset; in rtw_pci_phy_cfg()
1473 value = para->value; in rtw_pci_phy_cfg()
1474 if (para->ip_sel == RTW_IP_SEL_PHY) in rtw_pci_phy_cfg()
1480 for (i = 0; i < chip->intf_table->n_gen2_para; i++) { in rtw_pci_phy_cfg()
1481 para = &chip->intf_table->gen2_para[i]; in rtw_pci_phy_cfg()
1482 if (!(para->cut_mask & cut)) in rtw_pci_phy_cfg()
1484 if (para->offset == 0xffff) in rtw_pci_phy_cfg()
1486 offset = para->offset; in rtw_pci_phy_cfg()
1487 value = para->value; in rtw_pci_phy_cfg()
1488 if (para->ip_sel == RTW_IP_SEL_PHY) in rtw_pci_phy_cfg()
1497 if (chip->id == RTW_CHIP_TYPE_8821C) { in rtw_pci_phy_cfg()
1505 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5) in rtw_pci_phy_cfg()
1512 struct rtw_dev *rtwdev = hw->priv; in rtw_pci_suspend()
1513 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_suspend()
1514 struct rtw_efuse *efuse = &rtwdev->efuse; in rtw_pci_suspend() local
1516 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) in rtw_pci_suspend()
1524 struct rtw_dev *rtwdev = hw->priv; in rtw_pci_resume()
1525 const struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_resume()
1526 struct rtw_efuse *efuse = &rtwdev->efuse; in rtw_pci_resume() local
1528 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) in rtw_pci_resume()
1547 pci_set_drvdata(pdev, rtwdev->hw); in rtw_pci_claim()
1548 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); in rtw_pci_claim()
1563 rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_setup_resource()
1564 rtwpci->pdev = pdev; in rtw_pci_setup_resource()
1630 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, in rtw_pci_request_irq()
1644 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); in rtw_pci_free_irq()
1655 if (rtwpci->rx_no_aspm) in rtw_pci_napi_poll()
1662 budget - work_done); in rtw_pci_napi_poll()
1669 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_napi_poll()
1670 if (rtwpci->running) in rtw_pci_napi_poll()
1672 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_napi_poll()
1681 if (rtwpci->rx_no_aspm) in rtw_pci_napi_poll()
1689 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_init()
1691 rtwpci->netdev = alloc_netdev_dummy(0); in rtw_pci_napi_init()
1692 if (!rtwpci->netdev) in rtw_pci_napi_init()
1693 return -ENOMEM; in rtw_pci_napi_init()
1695 netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll); in rtw_pci_napi_init()
1701 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_deinit()
1704 netif_napi_del(&rtwpci->napi); in rtw_pci_napi_deinit()
1705 free_netdev(rtwpci->netdev); in rtw_pci_napi_deinit()
1721 dev_err(&pdev->dev, "failed to allocate hw\n"); in rtw_pci_probe()
1722 return -ENOMEM; in rtw_pci_probe()
1725 rtwdev = hw->priv; in rtw_pci_probe()
1726 rtwdev->hw = hw; in rtw_pci_probe()
1727 rtwdev->dev = &pdev->dev; in rtw_pci_probe()
1728 rtwdev->chip = (struct rtw_chip_info *)id->driver_data; in rtw_pci_probe()
1729 rtwdev->hci.ops = &rtw_pci_ops; in rtw_pci_probe()
1730 rtwdev->hci.type = RTW_HCI_TYPE_PCIE; in rtw_pci_probe()
1732 rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_probe()
1733 atomic_set(&rtwpci->link_usage, 1); in rtw_pci_probe()
1741 pdev->vendor, pdev->device, pdev->revision); in rtw_pci_probe()
1768 if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL) in rtw_pci_probe()
1769 rtwpci->rx_no_aspm = true; in rtw_pci_probe()
1813 rtwdev = hw->priv; in rtw_pci_remove()
1814 rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_remove()
1836 rtwdev = hw->priv; in rtw_pci_shutdown()
1837 chip = rtwdev->chip; in rtw_pci_shutdown()
1839 if (chip->ops->shutdown) in rtw_pci_shutdown()
1840 chip->ops->shutdown(rtwdev); in rtw_pci_shutdown()