Lines Matching +full:efuse +full:- +full:settings

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
54 return skb->priority;
60 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
63 return readb(rtwpci->mmap + addr);
67 val = bus_read_1((struct resource *)rtwpci->mmap, addr);
68 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val);
75 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
78 return readw(rtwpci->mmap + addr);
82 val = bus_read_2((struct resource *)rtwpci->mmap, addr);
83 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val);
90 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
93 return readl(rtwpci->mmap + addr);
97 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
98 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
105 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
108 writeb(val, rtwpci->mmap + addr);
110 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, val);
111 return (bus_write_1((struct resource *)rtwpci->mmap, addr, val));
117 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
120 writew(val, rtwpci->mmap + addr);
122 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, val);
123 return (bus_write_2((struct resource *)rtwpci->mmap, addr, val));
129 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
132 writel(val, rtwpci->mmap + addr);
134 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, val);
135 return (bus_write_4((struct resource *)rtwpci->mmap, addr, val));
142 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
148 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
149 __skb_unlink(skb, &tx_ring->queue);
151 dma = tx_data->dma;
153 dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
161 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
162 u8 *head = tx_ring->r.head;
163 u32 len = tx_ring->r.len;
164 int ring_sz = len * tx_ring->r.desc_size;
169 dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
170 tx_ring->r.head = NULL;
176 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
182 for (i = 0; i < rx_ring->r.len; i++) {
183 skb = rx_ring->buf[i];
187 dma = *((dma_addr_t *)skb->cb);
188 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
190 rx_ring->buf[i] = NULL;
197 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
198 u8 *head = rx_ring->r.head;
199 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
203 dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
208 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
214 tx_ring = &rtwpci->tx_rings[i];
219 rx_ring = &rtwpci->rx_rings[i];
228 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
235 return -EINVAL;
238 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
241 return -ENOMEM;
244 skb_queue_head_init(&tx_ring->queue);
245 tx_ring->r.head = head;
246 tx_ring->r.dma = dma;
247 tx_ring->r.len = len;
248 tx_ring->r.desc_size = desc_size;
249 tx_ring->r.wp = 0;
250 tx_ring->r.rp = 0;
259 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
265 return -EINVAL;
267 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
268 if (dma_mapping_error(&pdev->dev, dma))
269 return -EBUSY;
271 *((dma_addr_t *)skb->cb) = dma;
272 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
275 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
276 buf_desc->dma = cpu_to_le32(dma);
285 struct device *dev = rtwdev->dev;
291 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
294 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
295 buf_desc->dma = cpu_to_le32(dma);
302 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
311 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
314 return -ENOMEM;
316 rx_ring->r.head = head;
322 ret = -ENOMEM;
326 memset(skb->data, 0, buf_sz);
327 rx_ring->buf[i] = skb;
336 rx_ring->r.dma = dma;
337 rx_ring->r.len = len;
338 rx_ring->r.desc_size = desc_size;
339 rx_ring->r.wp = 0;
340 rx_ring->r.rp = 0;
346 skb = rx_ring->buf[i];
349 dma = *((dma_addr_t *)skb->cb);
350 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
352 rx_ring->buf[i] = NULL;
354 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
363 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
366 const struct rtw_chip_info *chip = rtwdev->chip;
372 tx_desc_size = chip->tx_buf_desc_sz;
375 tx_ring = &rtwpci->tx_rings[i];
382 rx_desc_size = chip->rx_buf_desc_sz;
385 rx_ring = &rtwpci->rx_rings[j];
397 tx_ring = &rtwpci->tx_rings[i];
403 rx_ring = &rtwpci->rx_rings[j];
417 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
420 rtwpci->irq_mask[0] = IMR_HIGHDOK |
430 rtwpci->irq_mask[1] = IMR_TXFOVW |
432 rtwpci->irq_mask[3] = IMR_H2CDOK |
434 spin_lock_init(&rtwpci->irq_lock);
435 spin_lock_init(&rtwpci->hwirq_lock);
443 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
451 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
455 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
456 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
457 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
458 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
463 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
464 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
465 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
466 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
470 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
471 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
472 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
473 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
477 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
478 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
479 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
480 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
484 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
485 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
486 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
487 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
491 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
492 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
493 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
494 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
498 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
499 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
500 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
501 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
505 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
506 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
507 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
508 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
532 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
534 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
535 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
537 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
539 rtwpci->irq_enabled = true;
541 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
549 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
551 if (!rtwpci->irq_enabled)
559 rtwpci->irq_enabled = false;
562 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
570 rtwpci->rx_tag = 0;
575 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
590 tx_ring = &rtwpci->tx_rings[queue];
597 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
599 if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
602 napi_enable(&rtwpci->napi);
607 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
609 if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
612 napi_synchronize(&rtwpci->napi);
613 napi_disable(&rtwpci->napi);
618 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
622 spin_lock_bh(&rtwpci->irq_lock);
623 rtwpci->running = true;
625 spin_unlock_bh(&rtwpci->irq_lock);
632 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
633 struct pci_dev *pdev = rtwpci->pdev;
635 spin_lock_bh(&rtwpci->irq_lock);
636 rtwpci->running = false;
638 spin_unlock_bh(&rtwpci->irq_lock);
640 synchronize_irq(pdev->irq);
643 spin_lock_bh(&rtwpci->irq_lock);
645 spin_unlock_bh(&rtwpci->irq_lock);
650 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
655 if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
658 lockdep_assert_held(&rtwpci->irq_lock);
660 /* Deep PS state is not allowed to TX-DMA */
669 tx_ring = &rtwpci->tx_rings[queue];
672 if (skb_queue_len(&tx_ring->queue)) {
684 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
691 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
693 lockdep_assert_held(&rtwpci->irq_lock);
695 lockdep_assert_held(&((struct rtw_pci *)rtwdev->priv)->irq_lock);
698 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
704 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
706 spin_lock_bh(&rtwpci->irq_lock);
708 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
711 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
714 spin_unlock_bh(&rtwpci->irq_lock);
720 struct sk_buff *prev = skb_dequeue(&ring->queue);
728 dma = tx_data->dma;
729 dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
737 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
738 const struct rtw_chip_info *chip = rtwdev->chip;
740 u32 desc_sz = chip->rx_buf_desc_sz;
743 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
745 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
748 if (total_pkt_size != rtwpci->rx_tag)
751 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
764 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
765 struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
776 if (cur_rp == ring->r.wp)
811 if (queues == BIT(rtwdev->hw->queues) - 1) {
812 pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
814 for (i = 0; i < rtwdev->hw->queues; i++)
825 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
829 ring = &rtwpci->tx_rings[queue];
832 spin_lock_bh(&rtwpci->irq_lock);
833 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
835 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
836 spin_unlock_bh(&rtwpci->irq_lock);
841 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
845 if (test_and_clear_bit(queue, rtwpci->tx_queued))
854 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
855 const struct rtw_chip_info *chip = rtwdev->chip;
859 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
860 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
866 ring = &rtwpci->tx_rings[queue];
868 size = skb->len;
872 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
873 return -ENOSPC;
875 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
877 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
879 dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
881 if (dma_mapping_error(&rtwpci->pdev->dev, dma))
882 return -EBUSY;
887 psb_len = (skb->len - 1) / 128 + 1;
898 tx_data->dma = dma;
899 tx_data->sn = pkt_info->sn;
901 spin_lock_bh(&rtwpci->irq_lock);
903 skb_queue_tail(&ring->queue, skb);
908 /* update write-index, and kick it off later */
909 set_bit(queue, rtwpci->tx_queued);
910 if (++ring->r.wp >= ring->r.len)
911 ring->r.wp = 0;
914 spin_unlock_bh(&rtwpci->irq_lock);
929 return -ENOMEM;
956 return -ENOMEM;
977 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
985 ring = &rtwpci->tx_rings[queue];
986 spin_lock_bh(&rtwpci->irq_lock);
987 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
988 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
989 ring->queue_stopped = true;
991 spin_unlock_bh(&rtwpci->irq_lock);
999 struct ieee80211_hw *hw = rtwdev->hw;
1009 ring = &rtwpci->tx_rings[hw_queue];
1015 rp_idx = ring->r.rp;
1016 if (cur_rp >= ring->r.rp)
1017 count = cur_rp - ring->r.rp;
1019 count = ring->r.len - (ring->r.rp - cur_rp);
1021 while (count--) {
1022 skb = skb_dequeue(&ring->queue);
1024 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
1025 count, hw_queue, bd_idx, ring->r.rp, cur_rp);
1029 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
1038 if (ring->queue_stopped &&
1039 avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
1042 ring->queue_stopped = false;
1045 if (++rp_idx >= ring->r.len)
1048 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1053 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1054 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1059 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1060 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1062 info->flags |= IEEE80211_TX_STAT_ACK;
1068 ring->r.rp = cur_rp;
1073 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1074 struct napi_struct *napi = &rtwpci->napi;
1086 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1089 if (cur_wp >= ring->r.wp)
1090 count = cur_wp - ring->r.wp;
1092 count = ring->r.len - (ring->r.wp - cur_wp);
1100 const struct rtw_chip_info *chip = rtwdev->chip;
1101 struct napi_struct *napi = &rtwpci->napi;
1102 struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1106 u32 cur_rp = ring->r.rp;
1109 u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1110 u32 buf_desc_sz = chip->rx_buf_desc_sz;
1118 while (count--) {
1120 skb = ring->buf[cur_rp];
1121 dma = *((dma_addr_t *)skb->cb);
1122 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1124 rx_desc = skb->data;
1125 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1140 skb_put_data(new, skb->data, new_len);
1149 memcpy(new->cb, &rx_status, sizeof(rx_status));
1150 ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1155 /* new skb delivered to mac80211, re-enable original skb DMA */
1160 if (++cur_rp >= ring->r.len)
1164 ring->r.rp = cur_rp;
1168 ring->r.wp = cur_rp;
1169 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1179 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1187 irq_status[0] &= rtwpci->irq_mask[0];
1188 irq_status[1] &= rtwpci->irq_mask[1];
1189 irq_status[3] &= rtwpci->irq_mask[3];
1195 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1201 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1207 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1208 * are cleared, the edge-triggered interrupt will not be generated when
1219 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1223 spin_lock_bh(&rtwpci->irq_lock);
1248 if (rtwpci->running)
1250 spin_unlock_bh(&rtwpci->irq_lock);
1258 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1270 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1272 rtw_err(rtwdev, "failed to set dma mask to 32-bit\n");
1276 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1278 rtw_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
1287 rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1288 if (!rtwpci->mmap) {
1291 return -ENOMEM;
1305 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1307 if (rtwpci->mmap) {
1308 pci_iounmap(pdev, rtwpci->mmap);
1358 return -EIO;
1452 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1459 * experienced some inter-operability issues that the link tends
1464 if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1467 if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1468 (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1474 const struct rtw_chip_info *chip = rtwdev->chip;
1475 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1476 struct pci_dev *pdev = rtwpci->pdev;
1483 if (chip->id == RTW_CHIP_TYPE_8822C)
1492 * follow the host settings, and another is in charge of doing
1495 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1511 rtwpci->link_ctrl = link_ctrl;
1516 const struct rtw_chip_info *chip = rtwdev->chip;
1518 switch (chip->id) {
1520 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1531 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1532 const struct rtw_chip_info *chip = rtwdev->chip;
1533 struct rtw_efuse *efuse = &rtwdev->efuse;
1534 struct pci_dev *pdev = rtwpci->pdev;
1542 cut = BIT(0) << rtwdev->hal.cut_version;
1544 for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1545 para = &chip->intf_table->gen1_para[i];
1546 if (!(para->cut_mask & cut))
1548 if (para->offset == 0xffff)
1550 offset = para->offset;
1551 value = para->value;
1552 if (para->ip_sel == RTW_IP_SEL_PHY)
1558 for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1559 para = &chip->intf_table->gen2_para[i];
1560 if (!(para->cut_mask & cut))
1562 if (para->offset == 0xffff)
1564 offset = para->offset;
1565 value = para->value;
1566 if (para->ip_sel == RTW_IP_SEL_PHY)
1575 if (chip->id == RTW_CHIP_TYPE_8821C) {
1583 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
1590 struct rtw_dev *rtwdev = hw->priv;
1591 const struct rtw_chip_info *chip = rtwdev->chip;
1592 struct rtw_efuse *efuse = &rtwdev->efuse;
1594 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1602 struct rtw_dev *rtwdev = hw->priv;
1603 const struct rtw_chip_info *chip = rtwdev->chip;
1604 struct rtw_efuse *efuse = &rtwdev->efuse;
1606 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1625 pci_set_drvdata(pdev, rtwdev->hw);
1626 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1641 rtwpci = (struct rtw_pci *)rtwdev->priv;
1642 rtwpci->pdev = pdev;
1707 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1721 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1732 if (rtwpci->rx_no_aspm)
1739 budget - work_done);
1746 spin_lock_bh(&rtwpci->irq_lock);
1747 if (rtwpci->running)
1749 spin_unlock_bh(&rtwpci->irq_lock);
1758 if (rtwpci->rx_no_aspm)
1766 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1768 rtwpci->netdev = alloc_netdev_dummy(0);
1769 if (!rtwpci->netdev)
1770 return -ENOMEM;
1772 netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
1778 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1781 netif_napi_del(&rtwpci->napi);
1782 free_netdev(rtwpci->netdev);
1798 dev_err(&pdev->dev, "failed to allocate hw\n");
1799 return -ENOMEM;
1802 rtwdev = hw->priv;
1803 rtwdev->hw = hw;
1804 rtwdev->dev = &pdev->dev;
1805 rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1806 rtwdev->hci.ops = &rtw_pci_ops;
1807 rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1809 rtwpci = (struct rtw_pci *)rtwdev->priv;
1810 atomic_set(&rtwpci->link_usage, 1);
1818 pdev->vendor, pdev->device, pdev->revision);
1845 if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
1846 rtwpci->rx_no_aspm = true;
1890 rtwdev = hw->priv;
1891 rtwpci = (struct rtw_pci *)rtwdev->priv;
1913 rtwdev = hw->priv;
1914 chip = rtwdev->chip;
1916 if (chip->ops->shutdown)
1917 chip->ops->shutdown(rtwdev);