xref: /linux/drivers/net/wireless/realtek/rtw88/pci.c (revision b2bf9d61e14af4129362aeb9c10034229a6d8f08)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include <linux/dmi.h>
6 #include <linux/module.h>
7 #include <linux/pci.h>
8 #include "main.h"
9 #include "pci.h"
10 #include "reg.h"
11 #include "tx.h"
12 #include "rx.h"
13 #include "fw.h"
14 #include "ps.h"
15 #include "debug.h"
16 #include "mac.h"
17 
18 static bool rtw_disable_msi;
19 static bool rtw_pci_disable_aspm;
20 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
21 module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
22 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
23 MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
24 
25 static const u32 rtw_pci_tx_queue_idx_addr[] = {
26 	[RTW_TX_QUEUE_BK]	= RTK_PCI_TXBD_IDX_BKQ,
27 	[RTW_TX_QUEUE_BE]	= RTK_PCI_TXBD_IDX_BEQ,
28 	[RTW_TX_QUEUE_VI]	= RTK_PCI_TXBD_IDX_VIQ,
29 	[RTW_TX_QUEUE_VO]	= RTK_PCI_TXBD_IDX_VOQ,
30 	[RTW_TX_QUEUE_MGMT]	= RTK_PCI_TXBD_IDX_MGMTQ,
31 	[RTW_TX_QUEUE_HI0]	= RTK_PCI_TXBD_IDX_HI0Q,
32 	[RTW_TX_QUEUE_H2C]	= RTK_PCI_TXBD_IDX_H2CQ,
33 };
34 
35 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,
36 			      enum rtw_tx_queue_type queue)
37 {
38 	switch (queue) {
39 	case RTW_TX_QUEUE_BCN:
40 		return TX_DESC_QSEL_BEACON;
41 	case RTW_TX_QUEUE_H2C:
42 		return TX_DESC_QSEL_H2C;
43 	case RTW_TX_QUEUE_MGMT:
44 		return TX_DESC_QSEL_MGMT;
45 	case RTW_TX_QUEUE_HI0:
46 		return TX_DESC_QSEL_HIGH;
47 	default:
48 		return skb->priority;
49 	}
50 };
51 
52 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
53 {
54 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
55 
56 	return readb(rtwpci->mmap + addr);
57 }
58 
59 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
60 {
61 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
62 
63 	return readw(rtwpci->mmap + addr);
64 }
65 
66 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
67 {
68 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
69 
70 	return readl(rtwpci->mmap + addr);
71 }
72 
73 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
74 {
75 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
76 
77 	writeb(val, rtwpci->mmap + addr);
78 }
79 
80 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
81 {
82 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
83 
84 	writew(val, rtwpci->mmap + addr);
85 }
86 
87 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
88 {
89 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
90 
91 	writel(val, rtwpci->mmap + addr);
92 }
93 
94 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
95 				      struct rtw_pci_tx_ring *tx_ring)
96 {
97 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
98 	struct rtw_pci_tx_data *tx_data;
99 	struct sk_buff *skb, *tmp;
100 	dma_addr_t dma;
101 
102 	/* free every skb remained in tx list */
103 	skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
104 		__skb_unlink(skb, &tx_ring->queue);
105 		tx_data = rtw_pci_get_tx_data(skb);
106 		dma = tx_data->dma;
107 
108 		dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
109 		dev_kfree_skb_any(skb);
110 	}
111 }
112 
113 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
114 				 struct rtw_pci_tx_ring *tx_ring)
115 {
116 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
117 	u8 *head = tx_ring->r.head;
118 	u32 len = tx_ring->r.len;
119 	int ring_sz = len * tx_ring->r.desc_size;
120 
121 	rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
122 
123 	/* free the ring itself */
124 	dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
125 	tx_ring->r.head = NULL;
126 }
127 
128 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
129 				      struct rtw_pci_rx_ring *rx_ring)
130 {
131 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
132 	struct sk_buff *skb;
133 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
134 	dma_addr_t dma;
135 	int i;
136 
137 	for (i = 0; i < rx_ring->r.len; i++) {
138 		skb = rx_ring->buf[i];
139 		if (!skb)
140 			continue;
141 
142 		dma = *((dma_addr_t *)skb->cb);
143 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
144 		dev_kfree_skb(skb);
145 		rx_ring->buf[i] = NULL;
146 	}
147 }
148 
149 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
150 				 struct rtw_pci_rx_ring *rx_ring)
151 {
152 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
153 	u8 *head = rx_ring->r.head;
154 	int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
155 
156 	rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
157 
158 	dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
159 }
160 
161 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
162 {
163 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
164 	struct rtw_pci_tx_ring *tx_ring;
165 	struct rtw_pci_rx_ring *rx_ring;
166 	int i;
167 
168 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
169 		tx_ring = &rtwpci->tx_rings[i];
170 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
171 	}
172 
173 	for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
174 		rx_ring = &rtwpci->rx_rings[i];
175 		rtw_pci_free_rx_ring(rtwdev, rx_ring);
176 	}
177 }
178 
179 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
180 				struct rtw_pci_tx_ring *tx_ring,
181 				u8 desc_size, u32 len)
182 {
183 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
184 	int ring_sz = desc_size * len;
185 	dma_addr_t dma;
186 	u8 *head;
187 
188 	if (len > TRX_BD_IDX_MASK) {
189 		rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
190 		return -EINVAL;
191 	}
192 
193 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
194 	if (!head) {
195 		rtw_err(rtwdev, "failed to allocate tx ring\n");
196 		return -ENOMEM;
197 	}
198 
199 	skb_queue_head_init(&tx_ring->queue);
200 	tx_ring->r.head = head;
201 	tx_ring->r.dma = dma;
202 	tx_ring->r.len = len;
203 	tx_ring->r.desc_size = desc_size;
204 	tx_ring->r.wp = 0;
205 	tx_ring->r.rp = 0;
206 
207 	return 0;
208 }
209 
210 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
211 				 struct rtw_pci_rx_ring *rx_ring,
212 				 u32 idx, u32 desc_sz)
213 {
214 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
215 	struct rtw_pci_rx_buffer_desc *buf_desc;
216 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
217 	dma_addr_t dma;
218 
219 	if (!skb)
220 		return -EINVAL;
221 
222 	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
223 	if (dma_mapping_error(&pdev->dev, dma))
224 		return -EBUSY;
225 
226 	*((dma_addr_t *)skb->cb) = dma;
227 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
228 						     idx * desc_sz);
229 	memset(buf_desc, 0, sizeof(*buf_desc));
230 	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
231 	buf_desc->dma = cpu_to_le32(dma);
232 
233 	return 0;
234 }
235 
236 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
237 					struct rtw_pci_rx_ring *rx_ring,
238 					u32 idx, u32 desc_sz)
239 {
240 	struct device *dev = rtwdev->dev;
241 	struct rtw_pci_rx_buffer_desc *buf_desc;
242 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
243 
244 	dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
245 
246 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
247 						     idx * desc_sz);
248 	memset(buf_desc, 0, sizeof(*buf_desc));
249 	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
250 	buf_desc->dma = cpu_to_le32(dma);
251 }
252 
253 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
254 				struct rtw_pci_rx_ring *rx_ring,
255 				u8 desc_size, u32 len)
256 {
257 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
258 	struct sk_buff *skb = NULL;
259 	dma_addr_t dma;
260 	u8 *head;
261 	int ring_sz = desc_size * len;
262 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
263 	int i, allocated;
264 	int ret = 0;
265 
266 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
267 	if (!head) {
268 		rtw_err(rtwdev, "failed to allocate rx ring\n");
269 		return -ENOMEM;
270 	}
271 	rx_ring->r.head = head;
272 
273 	for (i = 0; i < len; i++) {
274 		skb = dev_alloc_skb(buf_sz);
275 		if (!skb) {
276 			allocated = i;
277 			ret = -ENOMEM;
278 			goto err_out;
279 		}
280 
281 		memset(skb->data, 0, buf_sz);
282 		rx_ring->buf[i] = skb;
283 		ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
284 		if (ret) {
285 			allocated = i;
286 			dev_kfree_skb_any(skb);
287 			goto err_out;
288 		}
289 	}
290 
291 	rx_ring->r.dma = dma;
292 	rx_ring->r.len = len;
293 	rx_ring->r.desc_size = desc_size;
294 	rx_ring->r.wp = 0;
295 	rx_ring->r.rp = 0;
296 
297 	return 0;
298 
299 err_out:
300 	for (i = 0; i < allocated; i++) {
301 		skb = rx_ring->buf[i];
302 		if (!skb)
303 			continue;
304 		dma = *((dma_addr_t *)skb->cb);
305 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
306 		dev_kfree_skb_any(skb);
307 		rx_ring->buf[i] = NULL;
308 	}
309 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
310 
311 	rtw_err(rtwdev, "failed to init rx buffer\n");
312 
313 	return ret;
314 }
315 
316 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
317 {
318 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
319 	struct rtw_pci_tx_ring *tx_ring;
320 	struct rtw_pci_rx_ring *rx_ring;
321 	const struct rtw_chip_info *chip = rtwdev->chip;
322 	int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
323 	int tx_desc_size, rx_desc_size;
324 	u32 len;
325 	int ret;
326 
327 	tx_desc_size = chip->tx_buf_desc_sz;
328 
329 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
330 		tx_ring = &rtwpci->tx_rings[i];
331 		len = max_num_of_tx_queue(i);
332 		ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
333 		if (ret)
334 			goto out;
335 	}
336 
337 	rx_desc_size = chip->rx_buf_desc_sz;
338 
339 	for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
340 		rx_ring = &rtwpci->rx_rings[j];
341 		ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
342 					   RTK_MAX_RX_DESC_NUM);
343 		if (ret)
344 			goto out;
345 	}
346 
347 	return 0;
348 
349 out:
350 	tx_alloced = i;
351 	for (i = 0; i < tx_alloced; i++) {
352 		tx_ring = &rtwpci->tx_rings[i];
353 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
354 	}
355 
356 	rx_alloced = j;
357 	for (j = 0; j < rx_alloced; j++) {
358 		rx_ring = &rtwpci->rx_rings[j];
359 		rtw_pci_free_rx_ring(rtwdev, rx_ring);
360 	}
361 
362 	return ret;
363 }
364 
365 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
366 {
367 	rtw_pci_free_trx_ring(rtwdev);
368 }
369 
370 static int rtw_pci_init(struct rtw_dev *rtwdev)
371 {
372 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
373 	int ret = 0;
374 
375 	rtwpci->irq_mask[0] = IMR_HIGHDOK |
376 			      IMR_MGNTDOK |
377 			      IMR_BKDOK |
378 			      IMR_BEDOK |
379 			      IMR_VIDOK |
380 			      IMR_VODOK |
381 			      IMR_ROK |
382 			      IMR_BCNDMAINT_E |
383 			      IMR_C2HCMD |
384 			      0;
385 	rtwpci->irq_mask[1] = IMR_TXFOVW |
386 			      0;
387 	rtwpci->irq_mask[3] = IMR_H2CDOK |
388 			      0;
389 	spin_lock_init(&rtwpci->irq_lock);
390 	spin_lock_init(&rtwpci->hwirq_lock);
391 	ret = rtw_pci_init_trx_ring(rtwdev);
392 
393 	return ret;
394 }
395 
396 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
397 {
398 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
399 	u32 len;
400 	u8 tmp;
401 	dma_addr_t dma;
402 
403 	tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
404 	rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
405 
406 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
407 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
408 
409 	if (!rtw_chip_wcpu_8051(rtwdev)) {
410 		len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
411 		dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
412 		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
413 		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
414 		rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
415 		rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
416 	}
417 
418 	len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
419 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
420 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
421 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
422 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
423 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
424 
425 	len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
426 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
427 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
428 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
429 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
430 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
431 
432 	len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
433 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
434 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
435 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
436 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
437 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
438 
439 	len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
440 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
441 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
442 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
443 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
444 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
445 
446 	len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
447 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
448 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
449 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
450 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
451 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
452 
453 	len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
454 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
455 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
456 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
457 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
458 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
459 
460 	len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
461 	dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
462 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
463 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
464 	rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
465 	rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
466 
467 	/* reset read/write point */
468 	rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
469 
470 	/* reset H2C Queue index in a single write */
471 	if (rtw_chip_wcpu_3081(rtwdev))
472 		rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
473 				BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
474 }
475 
476 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
477 {
478 	rtw_pci_reset_buf_desc(rtwdev);
479 }
480 
481 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
482 				     struct rtw_pci *rtwpci, bool exclude_rx)
483 {
484 	unsigned long flags;
485 	u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
486 
487 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
488 
489 	rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
490 	rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
491 	if (rtw_chip_wcpu_3081(rtwdev))
492 		rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
493 
494 	rtwpci->irq_enabled = true;
495 
496 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
497 }
498 
499 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
500 				      struct rtw_pci *rtwpci)
501 {
502 	unsigned long flags;
503 
504 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
505 
506 	if (!rtwpci->irq_enabled)
507 		goto out;
508 
509 	rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
510 	rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
511 	if (rtw_chip_wcpu_3081(rtwdev))
512 		rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
513 
514 	rtwpci->irq_enabled = false;
515 
516 out:
517 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
518 }
519 
520 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
521 {
522 	/* reset dma and rx tag */
523 	rtw_write32_set(rtwdev, RTK_PCI_CTRL,
524 			BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
525 	rtwpci->rx_tag = 0;
526 }
527 
528 static int rtw_pci_setup(struct rtw_dev *rtwdev)
529 {
530 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
531 
532 	rtw_pci_reset_trx_ring(rtwdev);
533 	rtw_pci_dma_reset(rtwdev, rtwpci);
534 
535 	return 0;
536 }
537 
538 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
539 {
540 	struct rtw_pci_tx_ring *tx_ring;
541 	enum rtw_tx_queue_type queue;
542 
543 	rtw_pci_reset_trx_ring(rtwdev);
544 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
545 		tx_ring = &rtwpci->tx_rings[queue];
546 		rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
547 	}
548 }
549 
550 static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
551 {
552 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
553 
554 	if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
555 		return;
556 
557 	napi_enable(&rtwpci->napi);
558 }
559 
560 static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
561 {
562 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
563 
564 	if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
565 		return;
566 
567 	napi_synchronize(&rtwpci->napi);
568 	napi_disable(&rtwpci->napi);
569 }
570 
571 static int rtw_pci_start(struct rtw_dev *rtwdev)
572 {
573 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
574 
575 	rtw_pci_napi_start(rtwdev);
576 
577 	spin_lock_bh(&rtwpci->irq_lock);
578 	rtwpci->running = true;
579 	rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
580 	spin_unlock_bh(&rtwpci->irq_lock);
581 
582 	return 0;
583 }
584 
585 static void rtw_pci_stop(struct rtw_dev *rtwdev)
586 {
587 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
588 	struct pci_dev *pdev = rtwpci->pdev;
589 
590 	spin_lock_bh(&rtwpci->irq_lock);
591 	rtwpci->running = false;
592 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
593 	spin_unlock_bh(&rtwpci->irq_lock);
594 
595 	synchronize_irq(pdev->irq);
596 	rtw_pci_napi_stop(rtwdev);
597 
598 	spin_lock_bh(&rtwpci->irq_lock);
599 	rtw_pci_dma_release(rtwdev, rtwpci);
600 	spin_unlock_bh(&rtwpci->irq_lock);
601 }
602 
603 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
604 {
605 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
606 	struct rtw_pci_tx_ring *tx_ring;
607 	enum rtw_tx_queue_type queue;
608 	bool tx_empty = true;
609 
610 	if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
611 		goto enter_deep_ps;
612 
613 	lockdep_assert_held(&rtwpci->irq_lock);
614 
615 	/* Deep PS state is not allowed to TX-DMA */
616 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
617 		/* BCN queue is rsvd page, does not have DMA interrupt
618 		 * H2C queue is managed by firmware
619 		 */
620 		if (queue == RTW_TX_QUEUE_BCN ||
621 		    queue == RTW_TX_QUEUE_H2C)
622 			continue;
623 
624 		tx_ring = &rtwpci->tx_rings[queue];
625 
626 		/* check if there is any skb DMAing */
627 		if (skb_queue_len(&tx_ring->queue)) {
628 			tx_empty = false;
629 			break;
630 		}
631 	}
632 
633 	if (!tx_empty) {
634 		rtw_dbg(rtwdev, RTW_DBG_PS,
635 			"TX path not empty, cannot enter deep power save state\n");
636 		return;
637 	}
638 enter_deep_ps:
639 	set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
640 	rtw_power_mode_change(rtwdev, true);
641 }
642 
643 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
644 {
645 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
646 
647 	lockdep_assert_held(&rtwpci->irq_lock);
648 
649 	if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
650 		rtw_power_mode_change(rtwdev, false);
651 }
652 
653 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
654 {
655 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
656 
657 	spin_lock_bh(&rtwpci->irq_lock);
658 
659 	if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
660 		rtw_pci_deep_ps_enter(rtwdev);
661 
662 	if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
663 		rtw_pci_deep_ps_leave(rtwdev);
664 
665 	spin_unlock_bh(&rtwpci->irq_lock);
666 }
667 
668 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
669 				      struct rtw_pci_tx_ring *ring)
670 {
671 	struct sk_buff *prev = skb_dequeue(&ring->queue);
672 	struct rtw_pci_tx_data *tx_data;
673 	dma_addr_t dma;
674 
675 	if (!prev)
676 		return;
677 
678 	tx_data = rtw_pci_get_tx_data(prev);
679 	dma = tx_data->dma;
680 	dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
681 	dev_kfree_skb_any(prev);
682 }
683 
684 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
685 			      struct rtw_pci_rx_ring *rx_ring,
686 			      u32 idx)
687 {
688 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
689 	const struct rtw_chip_info *chip = rtwdev->chip;
690 	struct rtw_pci_rx_buffer_desc *buf_desc;
691 	u32 desc_sz = chip->rx_buf_desc_sz;
692 	u16 total_pkt_size;
693 
694 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
695 						     idx * desc_sz);
696 	total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
697 
698 	/* rx tag mismatch, throw a warning */
699 	if (total_pkt_size != rtwpci->rx_tag)
700 		rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
701 
702 	rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
703 }
704 
705 static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
706 {
707 	u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
708 	u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
709 
710 	return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
711 }
712 
713 static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
714 {
715 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
716 	struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
717 	u32 cur_rp;
718 	u8 i;
719 
720 	/* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
721 	 * bit dynamic, it's hard to define a reasonable fixed total timeout to
722 	 * use read_poll_timeout* helper. Instead, we can ensure a reasonable
723 	 * polling times, so we just use for loop with udelay here.
724 	 */
725 	for (i = 0; i < 30; i++) {
726 		cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
727 		if (cur_rp == ring->r.wp)
728 			return;
729 
730 		udelay(1);
731 	}
732 
733 	if (!drop)
734 		rtw_dbg(rtwdev, RTW_DBG_UNEXP,
735 			"timed out to flush pci tx ring[%d]\n", pci_q);
736 }
737 
738 static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
739 				   bool drop)
740 {
741 	u8 q;
742 
743 	for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
744 		/* Unnecessary to flush BCN, H2C and HI tx queues. */
745 		if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C ||
746 		    q == RTW_TX_QUEUE_HI0)
747 			continue;
748 
749 		if (pci_queues & BIT(q))
750 			__pci_flush_queue(rtwdev, q, drop);
751 	}
752 }
753 
754 static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
755 {
756 	u32 pci_queues = 0;
757 	u8 i;
758 
759 	/* If all of the hardware queues are requested to flush,
760 	 * flush all of the pci queues.
761 	 */
762 	if (queues == BIT(rtwdev->hw->queues) - 1) {
763 		pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
764 	} else {
765 		for (i = 0; i < rtwdev->hw->queues; i++)
766 			if (queues & BIT(i))
767 				pci_queues |= BIT(rtw_tx_ac_to_hwq(i));
768 	}
769 
770 	__rtw_pci_flush_queues(rtwdev, pci_queues, drop);
771 }
772 
773 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,
774 				      enum rtw_tx_queue_type queue)
775 {
776 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
777 	struct rtw_pci_tx_ring *ring;
778 	u32 bd_idx;
779 
780 	ring = &rtwpci->tx_rings[queue];
781 	bd_idx = rtw_pci_tx_queue_idx_addr[queue];
782 
783 	spin_lock_bh(&rtwpci->irq_lock);
784 	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
785 		rtw_pci_deep_ps_leave(rtwdev);
786 	rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
787 	spin_unlock_bh(&rtwpci->irq_lock);
788 }
789 
790 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
791 {
792 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
793 	enum rtw_tx_queue_type queue;
794 
795 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
796 		if (test_and_clear_bit(queue, rtwpci->tx_queued))
797 			rtw_pci_tx_kick_off_queue(rtwdev, queue);
798 }
799 
800 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
801 				 struct rtw_tx_pkt_info *pkt_info,
802 				 struct sk_buff *skb,
803 				 enum rtw_tx_queue_type queue)
804 {
805 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
806 	const struct rtw_chip_info *chip = rtwdev->chip;
807 	struct rtw_pci_tx_ring *ring;
808 	struct rtw_pci_tx_data *tx_data;
809 	dma_addr_t dma;
810 	u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
811 	u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
812 	u32 size;
813 	u32 psb_len;
814 	u8 *pkt_desc;
815 	struct rtw_pci_tx_buffer_desc *buf_desc;
816 
817 	ring = &rtwpci->tx_rings[queue];
818 
819 	size = skb->len;
820 
821 	if (queue == RTW_TX_QUEUE_BCN)
822 		rtw_pci_release_rsvd_page(rtwpci, ring);
823 	else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
824 		return -ENOSPC;
825 
826 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
827 	memset(pkt_desc, 0, tx_pkt_desc_sz);
828 	pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
829 	rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb);
830 	dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
831 			     DMA_TO_DEVICE);
832 	if (dma_mapping_error(&rtwpci->pdev->dev, dma))
833 		return -EBUSY;
834 
835 	/* after this we got dma mapped, there is no way back */
836 	buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
837 	memset(buf_desc, 0, tx_buf_desc_sz);
838 	psb_len = (skb->len - 1) / 128 + 1;
839 	if (queue == RTW_TX_QUEUE_BCN)
840 		psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
841 
842 	buf_desc[0].psb_len = cpu_to_le16(psb_len);
843 	buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
844 	buf_desc[0].dma = cpu_to_le32(dma);
845 	buf_desc[1].buf_size = cpu_to_le16(size);
846 	buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
847 
848 	tx_data = rtw_pci_get_tx_data(skb);
849 	tx_data->dma = dma;
850 	tx_data->sn = pkt_info->sn;
851 
852 	spin_lock_bh(&rtwpci->irq_lock);
853 
854 	skb_queue_tail(&ring->queue, skb);
855 
856 	if (queue == RTW_TX_QUEUE_BCN)
857 		goto out_unlock;
858 
859 	/* update write-index, and kick it off later */
860 	set_bit(queue, rtwpci->tx_queued);
861 	if (++ring->r.wp >= ring->r.len)
862 		ring->r.wp = 0;
863 
864 out_unlock:
865 	spin_unlock_bh(&rtwpci->irq_lock);
866 
867 	return 0;
868 }
869 
870 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
871 					u32 size)
872 {
873 	struct sk_buff *skb;
874 	struct rtw_tx_pkt_info pkt_info = {0};
875 	u8 reg_bcn_work;
876 	int ret;
877 
878 	skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
879 	if (!skb)
880 		return -ENOMEM;
881 
882 	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
883 	if (ret) {
884 		rtw_err(rtwdev, "failed to write rsvd page data\n");
885 		return ret;
886 	}
887 
888 	/* reserved pages go through beacon queue */
889 	reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
890 	reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
891 	rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
892 
893 	return 0;
894 }
895 
896 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
897 {
898 	struct sk_buff *skb;
899 	struct rtw_tx_pkt_info pkt_info = {0};
900 	int ret;
901 
902 	skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
903 	if (!skb)
904 		return -ENOMEM;
905 
906 	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
907 	if (ret) {
908 		rtw_err(rtwdev, "failed to write h2c data\n");
909 		return ret;
910 	}
911 
912 	rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
913 
914 	return 0;
915 }
916 
917 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
918 			    struct rtw_tx_pkt_info *pkt_info,
919 			    struct sk_buff *skb)
920 {
921 	enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
922 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
923 	struct rtw_pci_tx_ring *ring;
924 	int ret;
925 
926 	ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
927 	if (ret)
928 		return ret;
929 
930 	ring = &rtwpci->tx_rings[queue];
931 	spin_lock_bh(&rtwpci->irq_lock);
932 	if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
933 		ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
934 		ring->queue_stopped = true;
935 	}
936 	spin_unlock_bh(&rtwpci->irq_lock);
937 
938 	return 0;
939 }
940 
941 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
942 			   u8 hw_queue)
943 {
944 	struct ieee80211_hw *hw = rtwdev->hw;
945 	struct ieee80211_tx_info *info;
946 	struct rtw_pci_tx_ring *ring;
947 	struct rtw_pci_tx_data *tx_data;
948 	struct sk_buff *skb;
949 	u32 count;
950 	u32 bd_idx_addr;
951 	u32 bd_idx, cur_rp, rp_idx;
952 	u16 q_map;
953 
954 	ring = &rtwpci->tx_rings[hw_queue];
955 
956 	bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
957 	bd_idx = rtw_read32(rtwdev, bd_idx_addr);
958 	cur_rp = bd_idx >> 16;
959 	cur_rp &= TRX_BD_IDX_MASK;
960 	rp_idx = ring->r.rp;
961 	if (cur_rp >= ring->r.rp)
962 		count = cur_rp - ring->r.rp;
963 	else
964 		count = ring->r.len - (ring->r.rp - cur_rp);
965 
966 	while (count--) {
967 		skb = skb_dequeue(&ring->queue);
968 		if (!skb) {
969 			rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
970 				count, hw_queue, bd_idx, ring->r.rp, cur_rp);
971 			break;
972 		}
973 		tx_data = rtw_pci_get_tx_data(skb);
974 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
975 				 DMA_TO_DEVICE);
976 
977 		/* just free command packets from host to card */
978 		if (hw_queue == RTW_TX_QUEUE_H2C) {
979 			dev_kfree_skb_irq(skb);
980 			continue;
981 		}
982 
983 		if (ring->queue_stopped &&
984 		    avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
985 			q_map = skb_get_queue_mapping(skb);
986 			ieee80211_wake_queue(hw, q_map);
987 			ring->queue_stopped = false;
988 		}
989 
990 		if (++rp_idx >= ring->r.len)
991 			rp_idx = 0;
992 
993 		skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
994 
995 		info = IEEE80211_SKB_CB(skb);
996 
997 		/* enqueue to wait for tx report */
998 		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
999 			rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1000 			continue;
1001 		}
1002 
1003 		/* always ACK for others, then they won't be marked as drop */
1004 		if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1005 			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1006 		else
1007 			info->flags |= IEEE80211_TX_STAT_ACK;
1008 
1009 		ieee80211_tx_info_clear_status(info);
1010 		ieee80211_tx_status_irqsafe(hw, skb);
1011 	}
1012 
1013 	ring->r.rp = cur_rp;
1014 }
1015 
1016 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1017 {
1018 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1019 	struct napi_struct *napi = &rtwpci->napi;
1020 
1021 	napi_schedule(napi);
1022 }
1023 
1024 static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1025 				     struct rtw_pci *rtwpci)
1026 {
1027 	struct rtw_pci_rx_ring *ring;
1028 	int count = 0;
1029 	u32 tmp, cur_wp;
1030 
1031 	ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1032 	tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1033 	cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1034 	if (cur_wp >= ring->r.wp)
1035 		count = cur_wp - ring->r.wp;
1036 	else
1037 		count = ring->r.len - (ring->r.wp - cur_wp);
1038 
1039 	return count;
1040 }
1041 
1042 static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1043 			   u8 hw_queue, u32 limit)
1044 {
1045 	const struct rtw_chip_info *chip = rtwdev->chip;
1046 	struct napi_struct *napi = &rtwpci->napi;
1047 	struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1048 	struct rtw_rx_pkt_stat pkt_stat;
1049 	struct ieee80211_rx_status rx_status;
1050 	struct sk_buff *skb, *new;
1051 	u32 cur_rp = ring->r.rp;
1052 	u32 count, rx_done = 0;
1053 	u32 pkt_offset;
1054 	u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1055 	u32 buf_desc_sz = chip->rx_buf_desc_sz;
1056 	u32 new_len;
1057 	u8 *rx_desc;
1058 	dma_addr_t dma;
1059 
1060 	count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1061 	count = min(count, limit);
1062 
1063 	while (count--) {
1064 		rtw_pci_dma_check(rtwdev, ring, cur_rp);
1065 		skb = ring->buf[cur_rp];
1066 		dma = *((dma_addr_t *)skb->cb);
1067 		dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1068 					DMA_FROM_DEVICE);
1069 		rx_desc = skb->data;
1070 		rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1071 
1072 		/* offset from rx_desc to payload */
1073 		pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1074 			     pkt_stat.shift;
1075 
1076 		/* allocate a new skb for this frame,
1077 		 * discard the frame if none available
1078 		 */
1079 		new_len = pkt_stat.pkt_len + pkt_offset;
1080 		new = dev_alloc_skb(new_len);
1081 		if (WARN_ONCE(!new, "rx routine starvation\n"))
1082 			goto next_rp;
1083 
1084 		/* put the DMA data including rx_desc from phy to new skb */
1085 		skb_put_data(new, skb->data, new_len);
1086 
1087 		if (pkt_stat.is_c2h) {
1088 			rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1089 		} else {
1090 			/* remove rx_desc */
1091 			skb_pull(new, pkt_offset);
1092 
1093 			rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat);
1094 			rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1095 			memcpy(new->cb, &rx_status, sizeof(rx_status));
1096 			ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1097 			rx_done++;
1098 		}
1099 
1100 next_rp:
1101 		/* new skb delivered to mac80211, re-enable original skb DMA */
1102 		rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1103 					    buf_desc_sz);
1104 
1105 		/* host read next element in ring */
1106 		if (++cur_rp >= ring->r.len)
1107 			cur_rp = 0;
1108 	}
1109 
1110 	ring->r.rp = cur_rp;
1111 	/* 'rp', the last position we have read, is seen as previous posistion
1112 	 * of 'wp' that is used to calculate 'count' next time.
1113 	 */
1114 	ring->r.wp = cur_rp;
1115 	rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1116 
1117 	return rx_done;
1118 }
1119 
1120 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1121 				   struct rtw_pci *rtwpci, u32 *irq_status)
1122 {
1123 	unsigned long flags;
1124 
1125 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1126 
1127 	irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1128 	irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1129 	if (rtw_chip_wcpu_3081(rtwdev))
1130 		irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1131 	else
1132 		irq_status[3] = 0;
1133 	irq_status[0] &= rtwpci->irq_mask[0];
1134 	irq_status[1] &= rtwpci->irq_mask[1];
1135 	irq_status[3] &= rtwpci->irq_mask[3];
1136 	rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1137 	rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1138 	if (rtw_chip_wcpu_3081(rtwdev))
1139 		rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1140 
1141 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1142 }
1143 
1144 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1145 {
1146 	struct rtw_dev *rtwdev = dev;
1147 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1148 
1149 	/* disable RTW PCI interrupt to avoid more interrupts before the end of
1150 	 * thread function
1151 	 *
1152 	 * disable HIMR here to also avoid new HISR flag being raised before
1153 	 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1154 	 * are cleared, the edge-triggered interrupt will not be generated when
1155 	 * a new HISR flag is set.
1156 	 */
1157 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1158 
1159 	return IRQ_WAKE_THREAD;
1160 }
1161 
1162 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1163 {
1164 	struct rtw_dev *rtwdev = dev;
1165 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1166 	u32 irq_status[4];
1167 	bool rx = false;
1168 
1169 	spin_lock_bh(&rtwpci->irq_lock);
1170 	rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1171 
1172 	if (irq_status[0] & IMR_MGNTDOK)
1173 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1174 	if (irq_status[0] & IMR_HIGHDOK)
1175 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1176 	if (irq_status[0] & IMR_BEDOK)
1177 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1178 	if (irq_status[0] & IMR_BKDOK)
1179 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1180 	if (irq_status[0] & IMR_VODOK)
1181 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1182 	if (irq_status[0] & IMR_VIDOK)
1183 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1184 	if (irq_status[3] & IMR_H2CDOK)
1185 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1186 	if (irq_status[0] & IMR_ROK) {
1187 		rtw_pci_rx_isr(rtwdev);
1188 		rx = true;
1189 	}
1190 	if (unlikely(irq_status[0] & IMR_C2HCMD))
1191 		rtw_fw_c2h_cmd_isr(rtwdev);
1192 
1193 	/* all of the jobs for this interrupt have been done */
1194 	if (rtwpci->running)
1195 		rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1196 	spin_unlock_bh(&rtwpci->irq_lock);
1197 
1198 	return IRQ_HANDLED;
1199 }
1200 
1201 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1202 			      struct pci_dev *pdev)
1203 {
1204 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1205 	unsigned long len;
1206 	u8 bar_id = 2;
1207 	int ret;
1208 
1209 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
1210 	if (ret) {
1211 		rtw_err(rtwdev, "failed to request pci regions\n");
1212 		return ret;
1213 	}
1214 
1215 	len = pci_resource_len(pdev, bar_id);
1216 	rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1217 	if (!rtwpci->mmap) {
1218 		pci_release_regions(pdev);
1219 		rtw_err(rtwdev, "failed to map pci memory\n");
1220 		return -ENOMEM;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1227 				 struct pci_dev *pdev)
1228 {
1229 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1230 
1231 	if (rtwpci->mmap) {
1232 		pci_iounmap(pdev, rtwpci->mmap);
1233 		pci_release_regions(pdev);
1234 	}
1235 }
1236 
1237 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1238 {
1239 	u16 write_addr;
1240 	u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1241 	u8 flag;
1242 	u8 cnt;
1243 
1244 	write_addr = addr & BITS_DBI_ADDR_MASK;
1245 	write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1246 	rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1247 	rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1248 	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1249 
1250 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1251 		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1252 		if (flag == 0)
1253 			return;
1254 
1255 		udelay(10);
1256 	}
1257 
1258 	WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1259 }
1260 
1261 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1262 {
1263 	u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1264 	u8 flag;
1265 	u8 cnt;
1266 
1267 	rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1268 	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1269 
1270 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1271 		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1272 		if (flag == 0) {
1273 			read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1274 			*value = rtw_read8(rtwdev, read_addr);
1275 			return 0;
1276 		}
1277 
1278 		udelay(10);
1279 	}
1280 
1281 	WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1282 	return -EIO;
1283 }
1284 
1285 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1286 {
1287 	u8 page;
1288 	u8 wflag;
1289 	u8 cnt;
1290 
1291 	rtw_write16(rtwdev, REG_MDIO_V1, data);
1292 
1293 	page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1294 	page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1295 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1296 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1297 	rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1298 
1299 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1300 		wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1301 					BIT_MDIO_WFLAG_V1);
1302 		if (wflag == 0)
1303 			return;
1304 
1305 		udelay(10);
1306 	}
1307 
1308 	WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1309 }
1310 
1311 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1312 {
1313 	u8 value;
1314 	int ret;
1315 
1316 	if (rtw_pci_disable_aspm)
1317 		return;
1318 
1319 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1320 	if (ret) {
1321 		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1322 		return;
1323 	}
1324 
1325 	if (enable)
1326 		value |= BIT_CLKREQ_SW_EN;
1327 	else
1328 		value &= ~BIT_CLKREQ_SW_EN;
1329 
1330 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1331 }
1332 
1333 static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1334 {
1335 	u8 value;
1336 	int ret;
1337 
1338 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1339 	if (ret) {
1340 		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1341 		return;
1342 	}
1343 
1344 	if (enable)
1345 		value &= ~BIT_CLKREQ_N_PAD;
1346 	else
1347 		value |= BIT_CLKREQ_N_PAD;
1348 
1349 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1350 }
1351 
1352 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1353 {
1354 	u8 value;
1355 	int ret;
1356 
1357 	if (rtw_pci_disable_aspm)
1358 		return;
1359 
1360 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1361 	if (ret) {
1362 		rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1363 		return;
1364 	}
1365 
1366 	if (enable)
1367 		value |= BIT_L1_SW_EN;
1368 	else
1369 		value &= ~BIT_L1_SW_EN;
1370 
1371 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1372 }
1373 
1374 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1375 {
1376 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1377 
1378 	/* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1379 	 * only be enabled when host supports it.
1380 	 *
1381 	 * And ASPM mechanism should be enabled when driver/firmware enters
1382 	 * power save mode, without having heavy traffic. Because we've
1383 	 * experienced some inter-operability issues that the link tends
1384 	 * to enter L1 state on the fly even when driver is having high
1385 	 * throughput. This is probably because the ASPM behavior slightly
1386 	 * varies from different SOC.
1387 	 */
1388 	if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1389 		return;
1390 
1391 	if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1392 	    (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1393 		rtw_pci_aspm_set(rtwdev, enter);
1394 }
1395 
1396 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1397 {
1398 	const struct rtw_chip_info *chip = rtwdev->chip;
1399 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1400 	struct pci_dev *pdev = rtwpci->pdev;
1401 	u16 link_ctrl;
1402 	int ret;
1403 
1404 	/* RTL8822CE has enabled REFCLK auto calibration, it does not need
1405 	 * to add clock delay to cover the REFCLK timing gap.
1406 	 */
1407 	if (chip->id == RTW_CHIP_TYPE_8822C)
1408 		rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1409 
1410 	/* Though there is standard PCIE configuration space to set the
1411 	 * link control register, but by Realtek's design, driver should
1412 	 * check if host supports CLKREQ/ASPM to enable the HW module.
1413 	 *
1414 	 * These functions are implemented by two HW modules associated,
1415 	 * one is responsible to access PCIE configuration space to
1416 	 * follow the host settings, and another is in charge of doing
1417 	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1418 	 * the host does not support it, and due to some reasons or wrong
1419 	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1420 	 * loss if HW misbehaves on the link.
1421 	 *
1422 	 * Hence it's designed that driver should first check the PCIE
1423 	 * configuration space is sync'ed and enabled, then driver can turn
1424 	 * on the other module that is actually working on the mechanism.
1425 	 */
1426 	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1427 	if (ret) {
1428 		rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1429 		return;
1430 	}
1431 
1432 	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1433 		rtw_pci_clkreq_set(rtwdev, true);
1434 
1435 	rtwpci->link_ctrl = link_ctrl;
1436 }
1437 
1438 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1439 {
1440 	const struct rtw_chip_info *chip = rtwdev->chip;
1441 
1442 	switch (chip->id) {
1443 	case RTW_CHIP_TYPE_8822C:
1444 		if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1445 			rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1446 					 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1447 		break;
1448 	default:
1449 		break;
1450 	}
1451 }
1452 
1453 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1454 {
1455 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1456 	const struct rtw_chip_info *chip = rtwdev->chip;
1457 	struct rtw_efuse *efuse = &rtwdev->efuse;
1458 	struct pci_dev *pdev = rtwpci->pdev;
1459 	const struct rtw_intf_phy_para *para;
1460 	u16 cut;
1461 	u16 value;
1462 	u16 offset;
1463 	int i;
1464 	int ret;
1465 
1466 	cut = BIT(0) << rtwdev->hal.cut_version;
1467 
1468 	for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1469 		para = &chip->intf_table->gen1_para[i];
1470 		if (!(para->cut_mask & cut))
1471 			continue;
1472 		if (para->offset == 0xffff)
1473 			break;
1474 		offset = para->offset;
1475 		value = para->value;
1476 		if (para->ip_sel == RTW_IP_SEL_PHY)
1477 			rtw_mdio_write(rtwdev, offset, value, true);
1478 		else
1479 			rtw_dbi_write8(rtwdev, offset, value);
1480 	}
1481 
1482 	for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1483 		para = &chip->intf_table->gen2_para[i];
1484 		if (!(para->cut_mask & cut))
1485 			continue;
1486 		if (para->offset == 0xffff)
1487 			break;
1488 		offset = para->offset;
1489 		value = para->value;
1490 		if (para->ip_sel == RTW_IP_SEL_PHY)
1491 			rtw_mdio_write(rtwdev, offset, value, false);
1492 		else
1493 			rtw_dbi_write8(rtwdev, offset, value);
1494 	}
1495 
1496 	rtw_pci_link_cfg(rtwdev);
1497 
1498 	/* Disable 8821ce completion timeout by default */
1499 	if (chip->id == RTW_CHIP_TYPE_8821C) {
1500 		ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1501 					       PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
1502 		if (ret)
1503 			rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
1504 				ret);
1505 	}
1506 
1507 	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
1508 		rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);
1509 }
1510 
1511 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1512 {
1513 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1514 	struct rtw_dev *rtwdev = hw->priv;
1515 	const struct rtw_chip_info *chip = rtwdev->chip;
1516 	struct rtw_efuse *efuse = &rtwdev->efuse;
1517 
1518 	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1519 		rtw_pci_clkreq_pad_low(rtwdev, true);
1520 	return 0;
1521 }
1522 
1523 static int __maybe_unused rtw_pci_resume(struct device *dev)
1524 {
1525 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1526 	struct rtw_dev *rtwdev = hw->priv;
1527 	const struct rtw_chip_info *chip = rtwdev->chip;
1528 	struct rtw_efuse *efuse = &rtwdev->efuse;
1529 
1530 	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1531 		rtw_pci_clkreq_pad_low(rtwdev, false);
1532 	return 0;
1533 }
1534 
1535 SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1536 EXPORT_SYMBOL(rtw_pm_ops);
1537 
1538 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1539 {
1540 	int ret;
1541 
1542 	ret = pci_enable_device(pdev);
1543 	if (ret) {
1544 		rtw_err(rtwdev, "failed to enable pci device\n");
1545 		return ret;
1546 	}
1547 
1548 	pci_set_master(pdev);
1549 	pci_set_drvdata(pdev, rtwdev->hw);
1550 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1551 
1552 	return 0;
1553 }
1554 
1555 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1556 {
1557 	pci_disable_device(pdev);
1558 }
1559 
1560 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1561 {
1562 	struct rtw_pci *rtwpci;
1563 	int ret;
1564 
1565 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1566 	rtwpci->pdev = pdev;
1567 
1568 	/* after this driver can access to hw registers */
1569 	ret = rtw_pci_io_mapping(rtwdev, pdev);
1570 	if (ret) {
1571 		rtw_err(rtwdev, "failed to request pci io region\n");
1572 		goto err_out;
1573 	}
1574 
1575 	ret = rtw_pci_init(rtwdev);
1576 	if (ret) {
1577 		rtw_err(rtwdev, "failed to allocate pci resources\n");
1578 		goto err_io_unmap;
1579 	}
1580 
1581 	return 0;
1582 
1583 err_io_unmap:
1584 	rtw_pci_io_unmapping(rtwdev, pdev);
1585 
1586 err_out:
1587 	return ret;
1588 }
1589 
1590 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1591 {
1592 	rtw_pci_deinit(rtwdev);
1593 	rtw_pci_io_unmapping(rtwdev, pdev);
1594 }
1595 
1596 static const struct rtw_hci_ops rtw_pci_ops = {
1597 	.tx_write = rtw_pci_tx_write,
1598 	.tx_kick_off = rtw_pci_tx_kick_off,
1599 	.flush_queues = rtw_pci_flush_queues,
1600 	.setup = rtw_pci_setup,
1601 	.start = rtw_pci_start,
1602 	.stop = rtw_pci_stop,
1603 	.deep_ps = rtw_pci_deep_ps,
1604 	.link_ps = rtw_pci_link_ps,
1605 	.interface_cfg = rtw_pci_interface_cfg,
1606 	.dynamic_rx_agg = NULL,
1607 	.write_firmware_page = rtw_write_firmware_page,
1608 
1609 	.read8 = rtw_pci_read8,
1610 	.read16 = rtw_pci_read16,
1611 	.read32 = rtw_pci_read32,
1612 	.write8 = rtw_pci_write8,
1613 	.write16 = rtw_pci_write16,
1614 	.write32 = rtw_pci_write32,
1615 	.write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1616 	.write_data_h2c = rtw_pci_write_data_h2c,
1617 };
1618 
1619 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1620 {
1621 	unsigned int flags = PCI_IRQ_INTX;
1622 	int ret;
1623 
1624 	if (!rtw_disable_msi)
1625 		flags |= PCI_IRQ_MSI;
1626 
1627 	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1628 	if (ret < 0) {
1629 		rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1630 		return ret;
1631 	}
1632 
1633 	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1634 					rtw_pci_interrupt_handler,
1635 					rtw_pci_interrupt_threadfn,
1636 					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1637 	if (ret) {
1638 		rtw_err(rtwdev, "failed to request irq %d\n", ret);
1639 		pci_free_irq_vectors(pdev);
1640 	}
1641 
1642 	return ret;
1643 }
1644 
1645 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1646 {
1647 	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1648 	pci_free_irq_vectors(pdev);
1649 }
1650 
1651 static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1652 {
1653 	struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1654 	struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1655 					      priv);
1656 	int work_done = 0;
1657 
1658 	if (rtwpci->rx_no_aspm)
1659 		rtw_pci_link_ps(rtwdev, false);
1660 
1661 	while (work_done < budget) {
1662 		u32 work_done_once;
1663 
1664 		work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1665 						 budget - work_done);
1666 		if (work_done_once == 0)
1667 			break;
1668 		work_done += work_done_once;
1669 	}
1670 	if (work_done < budget) {
1671 		napi_complete_done(napi, work_done);
1672 		spin_lock_bh(&rtwpci->irq_lock);
1673 		if (rtwpci->running)
1674 			rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1675 		spin_unlock_bh(&rtwpci->irq_lock);
1676 		/* When ISR happens during polling and before napi_complete
1677 		 * while no further data is received. Data on the dma_ring will
1678 		 * not be processed immediately. Check whether dma ring is
1679 		 * empty and perform napi_schedule accordingly.
1680 		 */
1681 		if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1682 			napi_schedule(napi);
1683 	}
1684 	if (rtwpci->rx_no_aspm)
1685 		rtw_pci_link_ps(rtwdev, true);
1686 
1687 	return work_done;
1688 }
1689 
1690 static int rtw_pci_napi_init(struct rtw_dev *rtwdev)
1691 {
1692 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1693 
1694 	rtwpci->netdev = alloc_netdev_dummy(0);
1695 	if (!rtwpci->netdev)
1696 		return -ENOMEM;
1697 
1698 	netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
1699 	return 0;
1700 }
1701 
1702 static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1703 {
1704 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1705 
1706 	rtw_pci_napi_stop(rtwdev);
1707 	netif_napi_del(&rtwpci->napi);
1708 	free_netdev(rtwpci->netdev);
1709 }
1710 
1711 static pci_ers_result_t rtw_pci_io_err_detected(struct pci_dev *pdev,
1712 						pci_channel_state_t state)
1713 {
1714 	struct net_device *netdev = pci_get_drvdata(pdev);
1715 
1716 	netif_device_detach(netdev);
1717 
1718 	return PCI_ERS_RESULT_NEED_RESET;
1719 }
1720 
1721 static pci_ers_result_t rtw_pci_io_slot_reset(struct pci_dev *pdev)
1722 {
1723 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1724 	struct rtw_dev *rtwdev = hw->priv;
1725 
1726 	rtw_fw_recovery(rtwdev);
1727 
1728 	return PCI_ERS_RESULT_RECOVERED;
1729 }
1730 
1731 static void rtw_pci_io_resume(struct pci_dev *pdev)
1732 {
1733 	struct net_device *netdev = pci_get_drvdata(pdev);
1734 
1735 	/* ack any pending wake events, disable PME */
1736 	pci_enable_wake(pdev, PCI_D0, 0);
1737 
1738 	netif_device_attach(netdev);
1739 }
1740 
1741 const struct pci_error_handlers rtw_pci_err_handler = {
1742 	.error_detected = rtw_pci_io_err_detected,
1743 	.slot_reset = rtw_pci_io_slot_reset,
1744 	.resume = rtw_pci_io_resume,
1745 };
1746 EXPORT_SYMBOL(rtw_pci_err_handler);
1747 
1748 static int rtw_pci_disable_caps(const struct dmi_system_id *dmi)
1749 {
1750 	uintptr_t dis_caps = (uintptr_t)dmi->driver_data;
1751 
1752 	if (dis_caps & BIT(QUIRK_DIS_CAP_PCI_ASPM))
1753 		rtw_pci_disable_aspm = true;
1754 
1755 	if (dis_caps & BIT(QUIRK_DIS_CAP_LPS_DEEP))
1756 		rtw_disable_lps_deep_mode = true;
1757 
1758 	return 1;
1759 }
1760 
1761 static const struct dmi_system_id rtw_pci_quirks[] = {
1762 	{
1763 		.callback = rtw_pci_disable_caps,
1764 		.ident = "HP Notebook - P3S95EA#ACB",
1765 		.matches = {
1766 			DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1767 			DMI_MATCH(DMI_PRODUCT_NAME, "HP Notebook"),
1768 			DMI_MATCH(DMI_PRODUCT_SKU, "P3S95EA#ACB"),
1769 		},
1770 		.driver_data = (void *)(BIT(QUIRK_DIS_CAP_PCI_ASPM) |
1771 					BIT(QUIRK_DIS_CAP_LPS_DEEP)),
1772 	},
1773 	{}
1774 };
1775 
1776 int rtw_pci_probe(struct pci_dev *pdev,
1777 		  const struct pci_device_id *id)
1778 {
1779 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
1780 	struct ieee80211_hw *hw;
1781 	struct rtw_dev *rtwdev;
1782 	struct rtw_pci *rtwpci;
1783 	int drv_data_size;
1784 	int ret;
1785 
1786 	drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1787 	hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1788 	if (!hw) {
1789 		dev_err(&pdev->dev, "failed to allocate hw\n");
1790 		return -ENOMEM;
1791 	}
1792 
1793 	rtwdev = hw->priv;
1794 	rtwdev->hw = hw;
1795 	rtwdev->dev = &pdev->dev;
1796 	rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1797 	rtwdev->hci.ops = &rtw_pci_ops;
1798 	rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1799 
1800 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1801 	atomic_set(&rtwpci->link_usage, 1);
1802 
1803 	dmi_check_system(rtw_pci_quirks);
1804 
1805 	ret = rtw_core_init(rtwdev);
1806 	if (ret)
1807 		goto err_release_hw;
1808 
1809 	rtw_dbg(rtwdev, RTW_DBG_PCI,
1810 		"rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1811 		pdev->vendor, pdev->device, pdev->revision);
1812 
1813 	ret = rtw_pci_claim(rtwdev, pdev);
1814 	if (ret) {
1815 		rtw_err(rtwdev, "failed to claim pci device\n");
1816 		goto err_deinit_core;
1817 	}
1818 
1819 	ret = rtw_pci_setup_resource(rtwdev, pdev);
1820 	if (ret) {
1821 		rtw_err(rtwdev, "failed to setup pci resources\n");
1822 		goto err_pci_declaim;
1823 	}
1824 
1825 	ret = rtw_pci_napi_init(rtwdev);
1826 	if (ret) {
1827 		rtw_err(rtwdev, "failed to setup NAPI\n");
1828 		goto err_pci_declaim;
1829 	}
1830 
1831 	ret = rtw_chip_info_setup(rtwdev);
1832 	if (ret) {
1833 		rtw_err(rtwdev, "failed to setup chip information\n");
1834 		goto err_destroy_pci;
1835 	}
1836 
1837 	/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
1838 	if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C &&
1839 	    bridge && bridge->vendor == PCI_VENDOR_ID_INTEL)
1840 		rtwpci->rx_no_aspm = true;
1841 
1842 	rtw_pci_phy_cfg(rtwdev);
1843 
1844 	ret = rtw_register_hw(rtwdev, hw);
1845 	if (ret) {
1846 		rtw_err(rtwdev, "failed to register hw\n");
1847 		goto err_destroy_pci;
1848 	}
1849 
1850 	ret = rtw_pci_request_irq(rtwdev, pdev);
1851 	if (ret) {
1852 		ieee80211_unregister_hw(hw);
1853 		goto err_destroy_pci;
1854 	}
1855 
1856 	return 0;
1857 
1858 err_destroy_pci:
1859 	rtw_pci_napi_deinit(rtwdev);
1860 	rtw_pci_destroy(rtwdev, pdev);
1861 
1862 err_pci_declaim:
1863 	rtw_pci_declaim(rtwdev, pdev);
1864 
1865 err_deinit_core:
1866 	rtw_core_deinit(rtwdev);
1867 
1868 err_release_hw:
1869 	ieee80211_free_hw(hw);
1870 
1871 	return ret;
1872 }
1873 EXPORT_SYMBOL(rtw_pci_probe);
1874 
1875 void rtw_pci_remove(struct pci_dev *pdev)
1876 {
1877 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1878 	struct rtw_dev *rtwdev;
1879 	struct rtw_pci *rtwpci;
1880 
1881 	if (!hw)
1882 		return;
1883 
1884 	rtwdev = hw->priv;
1885 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1886 
1887 	rtw_unregister_hw(rtwdev, hw);
1888 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1889 	rtw_pci_napi_deinit(rtwdev);
1890 	rtw_pci_destroy(rtwdev, pdev);
1891 	rtw_pci_declaim(rtwdev, pdev);
1892 	rtw_pci_free_irq(rtwdev, pdev);
1893 	rtw_core_deinit(rtwdev);
1894 	ieee80211_free_hw(hw);
1895 }
1896 EXPORT_SYMBOL(rtw_pci_remove);
1897 
1898 void rtw_pci_shutdown(struct pci_dev *pdev)
1899 {
1900 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1901 	struct rtw_dev *rtwdev;
1902 	const struct rtw_chip_info *chip;
1903 
1904 	if (!hw)
1905 		return;
1906 
1907 	rtwdev = hw->priv;
1908 	chip = rtwdev->chip;
1909 
1910 	if (chip->ops->shutdown)
1911 		chip->ops->shutdown(rtwdev);
1912 
1913 	pci_set_power_state(pdev, PCI_D3hot);
1914 }
1915 EXPORT_SYMBOL(rtw_pci_shutdown);
1916 
1917 MODULE_AUTHOR("Realtek Corporation");
1918 MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver");
1919 MODULE_LICENSE("Dual BSD/GPL");
1920