xref: /linux/drivers/net/wireless/realtek/rtw88/pci.c (revision f56f08636ddac358b4e57494b569e5c2174c5fd3)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include "main.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "tx.h"
11 #include "rx.h"
12 #include "fw.h"
13 #include "ps.h"
14 #include "debug.h"
15 
16 static bool rtw_disable_msi;
17 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
18 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
19 
20 static u32 rtw_pci_tx_queue_idx_addr[] = {
21 	[RTW_TX_QUEUE_BK]	= RTK_PCI_TXBD_IDX_BKQ,
22 	[RTW_TX_QUEUE_BE]	= RTK_PCI_TXBD_IDX_BEQ,
23 	[RTW_TX_QUEUE_VI]	= RTK_PCI_TXBD_IDX_VIQ,
24 	[RTW_TX_QUEUE_VO]	= RTK_PCI_TXBD_IDX_VOQ,
25 	[RTW_TX_QUEUE_MGMT]	= RTK_PCI_TXBD_IDX_MGMTQ,
26 	[RTW_TX_QUEUE_HI0]	= RTK_PCI_TXBD_IDX_HI0Q,
27 	[RTW_TX_QUEUE_H2C]	= RTK_PCI_TXBD_IDX_H2CQ,
28 };
29 
30 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
31 {
32 	switch (queue) {
33 	case RTW_TX_QUEUE_BCN:
34 		return TX_DESC_QSEL_BEACON;
35 	case RTW_TX_QUEUE_H2C:
36 		return TX_DESC_QSEL_H2C;
37 	case RTW_TX_QUEUE_MGMT:
38 		return TX_DESC_QSEL_MGMT;
39 	case RTW_TX_QUEUE_HI0:
40 		return TX_DESC_QSEL_HIGH;
41 	default:
42 		return skb->priority;
43 	}
44 };
45 
46 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
47 {
48 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
49 
50 	return readb(rtwpci->mmap + addr);
51 }
52 
53 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
54 {
55 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
56 
57 	return readw(rtwpci->mmap + addr);
58 }
59 
60 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
61 {
62 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
63 
64 	return readl(rtwpci->mmap + addr);
65 }
66 
67 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
68 {
69 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
70 
71 	writeb(val, rtwpci->mmap + addr);
72 }
73 
74 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
75 {
76 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
77 
78 	writew(val, rtwpci->mmap + addr);
79 }
80 
81 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
82 {
83 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
84 
85 	writel(val, rtwpci->mmap + addr);
86 }
87 
88 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
89 {
90 	int offset = tx_ring->r.desc_size * idx;
91 
92 	return tx_ring->r.head + offset;
93 }
94 
95 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
96 				      struct rtw_pci_tx_ring *tx_ring)
97 {
98 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
99 	struct rtw_pci_tx_data *tx_data;
100 	struct sk_buff *skb, *tmp;
101 	dma_addr_t dma;
102 
103 	/* free every skb remained in tx list */
104 	skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
105 		__skb_unlink(skb, &tx_ring->queue);
106 		tx_data = rtw_pci_get_tx_data(skb);
107 		dma = tx_data->dma;
108 
109 		pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
110 		dev_kfree_skb_any(skb);
111 	}
112 }
113 
114 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
115 				 struct rtw_pci_tx_ring *tx_ring)
116 {
117 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
118 	u8 *head = tx_ring->r.head;
119 	u32 len = tx_ring->r.len;
120 	int ring_sz = len * tx_ring->r.desc_size;
121 
122 	rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
123 
124 	/* free the ring itself */
125 	pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
126 	tx_ring->r.head = NULL;
127 }
128 
129 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
130 				      struct rtw_pci_rx_ring *rx_ring)
131 {
132 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
133 	struct sk_buff *skb;
134 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
135 	dma_addr_t dma;
136 	int i;
137 
138 	for (i = 0; i < rx_ring->r.len; i++) {
139 		skb = rx_ring->buf[i];
140 		if (!skb)
141 			continue;
142 
143 		dma = *((dma_addr_t *)skb->cb);
144 		pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
145 		dev_kfree_skb(skb);
146 		rx_ring->buf[i] = NULL;
147 	}
148 }
149 
150 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
151 				 struct rtw_pci_rx_ring *rx_ring)
152 {
153 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
154 	u8 *head = rx_ring->r.head;
155 	int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
156 
157 	rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
158 
159 	pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
160 }
161 
162 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
163 {
164 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
165 	struct rtw_pci_tx_ring *tx_ring;
166 	struct rtw_pci_rx_ring *rx_ring;
167 	int i;
168 
169 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
170 		tx_ring = &rtwpci->tx_rings[i];
171 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
172 	}
173 
174 	for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
175 		rx_ring = &rtwpci->rx_rings[i];
176 		rtw_pci_free_rx_ring(rtwdev, rx_ring);
177 	}
178 }
179 
180 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
181 				struct rtw_pci_tx_ring *tx_ring,
182 				u8 desc_size, u32 len)
183 {
184 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
185 	int ring_sz = desc_size * len;
186 	dma_addr_t dma;
187 	u8 *head;
188 
189 	if (len > TRX_BD_IDX_MASK) {
190 		rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
191 		return -EINVAL;
192 	}
193 
194 	head = pci_zalloc_consistent(pdev, ring_sz, &dma);
195 	if (!head) {
196 		rtw_err(rtwdev, "failed to allocate tx ring\n");
197 		return -ENOMEM;
198 	}
199 
200 	skb_queue_head_init(&tx_ring->queue);
201 	tx_ring->r.head = head;
202 	tx_ring->r.dma = dma;
203 	tx_ring->r.len = len;
204 	tx_ring->r.desc_size = desc_size;
205 	tx_ring->r.wp = 0;
206 	tx_ring->r.rp = 0;
207 
208 	return 0;
209 }
210 
211 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
212 				 struct rtw_pci_rx_ring *rx_ring,
213 				 u32 idx, u32 desc_sz)
214 {
215 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
216 	struct rtw_pci_rx_buffer_desc *buf_desc;
217 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
218 	dma_addr_t dma;
219 
220 	if (!skb)
221 		return -EINVAL;
222 
223 	dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
224 	if (pci_dma_mapping_error(pdev, dma))
225 		return -EBUSY;
226 
227 	*((dma_addr_t *)skb->cb) = dma;
228 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
229 						     idx * desc_sz);
230 	memset(buf_desc, 0, sizeof(*buf_desc));
231 	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
232 	buf_desc->dma = cpu_to_le32(dma);
233 
234 	return 0;
235 }
236 
237 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
238 					struct rtw_pci_rx_ring *rx_ring,
239 					u32 idx, u32 desc_sz)
240 {
241 	struct device *dev = rtwdev->dev;
242 	struct rtw_pci_rx_buffer_desc *buf_desc;
243 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
244 
245 	dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
246 
247 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
248 						     idx * desc_sz);
249 	memset(buf_desc, 0, sizeof(*buf_desc));
250 	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
251 	buf_desc->dma = cpu_to_le32(dma);
252 }
253 
254 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
255 				struct rtw_pci_rx_ring *rx_ring,
256 				u8 desc_size, u32 len)
257 {
258 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
259 	struct sk_buff *skb = NULL;
260 	dma_addr_t dma;
261 	u8 *head;
262 	int ring_sz = desc_size * len;
263 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
264 	int i, allocated;
265 	int ret = 0;
266 
267 	if (len > TRX_BD_IDX_MASK) {
268 		rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len);
269 		return -EINVAL;
270 	}
271 
272 	head = pci_zalloc_consistent(pdev, ring_sz, &dma);
273 	if (!head) {
274 		rtw_err(rtwdev, "failed to allocate rx ring\n");
275 		return -ENOMEM;
276 	}
277 	rx_ring->r.head = head;
278 
279 	for (i = 0; i < len; i++) {
280 		skb = dev_alloc_skb(buf_sz);
281 		if (!skb) {
282 			allocated = i;
283 			ret = -ENOMEM;
284 			goto err_out;
285 		}
286 
287 		memset(skb->data, 0, buf_sz);
288 		rx_ring->buf[i] = skb;
289 		ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
290 		if (ret) {
291 			allocated = i;
292 			dev_kfree_skb_any(skb);
293 			goto err_out;
294 		}
295 	}
296 
297 	rx_ring->r.dma = dma;
298 	rx_ring->r.len = len;
299 	rx_ring->r.desc_size = desc_size;
300 	rx_ring->r.wp = 0;
301 	rx_ring->r.rp = 0;
302 
303 	return 0;
304 
305 err_out:
306 	for (i = 0; i < allocated; i++) {
307 		skb = rx_ring->buf[i];
308 		if (!skb)
309 			continue;
310 		dma = *((dma_addr_t *)skb->cb);
311 		pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
312 		dev_kfree_skb_any(skb);
313 		rx_ring->buf[i] = NULL;
314 	}
315 	pci_free_consistent(pdev, ring_sz, head, dma);
316 
317 	rtw_err(rtwdev, "failed to init rx buffer\n");
318 
319 	return ret;
320 }
321 
322 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
323 {
324 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
325 	struct rtw_pci_tx_ring *tx_ring;
326 	struct rtw_pci_rx_ring *rx_ring;
327 	struct rtw_chip_info *chip = rtwdev->chip;
328 	int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
329 	int tx_desc_size, rx_desc_size;
330 	u32 len;
331 	int ret;
332 
333 	tx_desc_size = chip->tx_buf_desc_sz;
334 
335 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
336 		tx_ring = &rtwpci->tx_rings[i];
337 		len = max_num_of_tx_queue(i);
338 		ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
339 		if (ret)
340 			goto out;
341 	}
342 
343 	rx_desc_size = chip->rx_buf_desc_sz;
344 
345 	for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
346 		rx_ring = &rtwpci->rx_rings[j];
347 		ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
348 					   RTK_MAX_RX_DESC_NUM);
349 		if (ret)
350 			goto out;
351 	}
352 
353 	return 0;
354 
355 out:
356 	tx_alloced = i;
357 	for (i = 0; i < tx_alloced; i++) {
358 		tx_ring = &rtwpci->tx_rings[i];
359 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
360 	}
361 
362 	rx_alloced = j;
363 	for (j = 0; j < rx_alloced; j++) {
364 		rx_ring = &rtwpci->rx_rings[j];
365 		rtw_pci_free_rx_ring(rtwdev, rx_ring);
366 	}
367 
368 	return ret;
369 }
370 
371 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
372 {
373 	rtw_pci_free_trx_ring(rtwdev);
374 }
375 
376 static int rtw_pci_init(struct rtw_dev *rtwdev)
377 {
378 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
379 	int ret = 0;
380 
381 	rtwpci->irq_mask[0] = IMR_HIGHDOK |
382 			      IMR_MGNTDOK |
383 			      IMR_BKDOK |
384 			      IMR_BEDOK |
385 			      IMR_VIDOK |
386 			      IMR_VODOK |
387 			      IMR_ROK |
388 			      IMR_BCNDMAINT_E |
389 			      0;
390 	rtwpci->irq_mask[1] = IMR_TXFOVW |
391 			      0;
392 	rtwpci->irq_mask[3] = IMR_H2CDOK |
393 			      0;
394 	spin_lock_init(&rtwpci->irq_lock);
395 	spin_lock_init(&rtwpci->hwirq_lock);
396 	ret = rtw_pci_init_trx_ring(rtwdev);
397 
398 	return ret;
399 }
400 
401 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
402 {
403 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
404 	u32 len;
405 	u8 tmp;
406 	dma_addr_t dma;
407 
408 	tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
409 	rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
410 
411 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
412 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
413 
414 	if (!rtw_chip_wcpu_11n(rtwdev)) {
415 		len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
416 		dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
417 		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
418 		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
419 		rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
420 		rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
421 	}
422 
423 	len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
424 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
425 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
426 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
427 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
428 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
429 
430 	len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
431 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
432 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
433 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
434 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
435 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
436 
437 	len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
438 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
439 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
440 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
441 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
442 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
443 
444 	len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
445 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
446 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
447 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
448 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
449 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
450 
451 	len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
452 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
453 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
454 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
455 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
456 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
457 
458 	len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
459 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
460 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
461 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
462 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
463 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
464 
465 	len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
466 	dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
467 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
468 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
469 	rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
470 	rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
471 
472 	/* reset read/write point */
473 	rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
474 
475 	/* reset H2C Queue index in a single write */
476 	if (rtw_chip_wcpu_11ac(rtwdev))
477 		rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
478 				BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
479 }
480 
481 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
482 {
483 	rtw_pci_reset_buf_desc(rtwdev);
484 }
485 
486 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
487 				     struct rtw_pci *rtwpci)
488 {
489 	unsigned long flags;
490 
491 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
492 
493 	rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
494 	rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
495 	if (rtw_chip_wcpu_11ac(rtwdev))
496 		rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
497 
498 	rtwpci->irq_enabled = true;
499 
500 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
501 }
502 
503 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
504 				      struct rtw_pci *rtwpci)
505 {
506 	unsigned long flags;
507 
508 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
509 
510 	if (!rtwpci->irq_enabled)
511 		goto out;
512 
513 	rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
514 	rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
515 	if (rtw_chip_wcpu_11ac(rtwdev))
516 		rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
517 
518 	rtwpci->irq_enabled = false;
519 
520 out:
521 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
522 }
523 
524 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
525 {
526 	/* reset dma and rx tag */
527 	rtw_write32_set(rtwdev, RTK_PCI_CTRL,
528 			BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
529 	rtwpci->rx_tag = 0;
530 }
531 
532 static int rtw_pci_setup(struct rtw_dev *rtwdev)
533 {
534 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
535 
536 	rtw_pci_reset_trx_ring(rtwdev);
537 	rtw_pci_dma_reset(rtwdev, rtwpci);
538 
539 	return 0;
540 }
541 
542 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
543 {
544 	struct rtw_pci_tx_ring *tx_ring;
545 	u8 queue;
546 
547 	rtw_pci_reset_trx_ring(rtwdev);
548 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
549 		tx_ring = &rtwpci->tx_rings[queue];
550 		rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
551 	}
552 }
553 
554 static int rtw_pci_start(struct rtw_dev *rtwdev)
555 {
556 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
557 
558 	spin_lock_bh(&rtwpci->irq_lock);
559 	rtw_pci_enable_interrupt(rtwdev, rtwpci);
560 	spin_unlock_bh(&rtwpci->irq_lock);
561 
562 	return 0;
563 }
564 
565 static void rtw_pci_stop(struct rtw_dev *rtwdev)
566 {
567 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
568 
569 	spin_lock_bh(&rtwpci->irq_lock);
570 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
571 	rtw_pci_dma_release(rtwdev, rtwpci);
572 	spin_unlock_bh(&rtwpci->irq_lock);
573 }
574 
575 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
576 {
577 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
578 	struct rtw_pci_tx_ring *tx_ring;
579 	bool tx_empty = true;
580 	u8 queue;
581 
582 	lockdep_assert_held(&rtwpci->irq_lock);
583 
584 	/* Deep PS state is not allowed to TX-DMA */
585 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
586 		/* BCN queue is rsvd page, does not have DMA interrupt
587 		 * H2C queue is managed by firmware
588 		 */
589 		if (queue == RTW_TX_QUEUE_BCN ||
590 		    queue == RTW_TX_QUEUE_H2C)
591 			continue;
592 
593 		tx_ring = &rtwpci->tx_rings[queue];
594 
595 		/* check if there is any skb DMAing */
596 		if (skb_queue_len(&tx_ring->queue)) {
597 			tx_empty = false;
598 			break;
599 		}
600 	}
601 
602 	if (!tx_empty) {
603 		rtw_dbg(rtwdev, RTW_DBG_PS,
604 			"TX path not empty, cannot enter deep power save state\n");
605 		return;
606 	}
607 
608 	set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
609 	rtw_power_mode_change(rtwdev, true);
610 }
611 
612 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
613 {
614 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
615 
616 	lockdep_assert_held(&rtwpci->irq_lock);
617 
618 	if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
619 		rtw_power_mode_change(rtwdev, false);
620 }
621 
622 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
623 {
624 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
625 
626 	spin_lock_bh(&rtwpci->irq_lock);
627 
628 	if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
629 		rtw_pci_deep_ps_enter(rtwdev);
630 
631 	if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
632 		rtw_pci_deep_ps_leave(rtwdev);
633 
634 	spin_unlock_bh(&rtwpci->irq_lock);
635 }
636 
637 static u8 ac_to_hwq[] = {
638 	[IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
639 	[IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
640 	[IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
641 	[IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
642 };
643 
644 static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
645 {
646 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
647 	__le16 fc = hdr->frame_control;
648 	u8 q_mapping = skb_get_queue_mapping(skb);
649 	u8 queue;
650 
651 	if (unlikely(ieee80211_is_beacon(fc)))
652 		queue = RTW_TX_QUEUE_BCN;
653 	else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
654 		queue = RTW_TX_QUEUE_MGMT;
655 	else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
656 		queue = ac_to_hwq[IEEE80211_AC_BE];
657 	else
658 		queue = ac_to_hwq[q_mapping];
659 
660 	return queue;
661 }
662 
663 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
664 				      struct rtw_pci_tx_ring *ring)
665 {
666 	struct sk_buff *prev = skb_dequeue(&ring->queue);
667 	struct rtw_pci_tx_data *tx_data;
668 	dma_addr_t dma;
669 
670 	if (!prev)
671 		return;
672 
673 	tx_data = rtw_pci_get_tx_data(prev);
674 	dma = tx_data->dma;
675 	pci_unmap_single(rtwpci->pdev, dma, prev->len,
676 			 PCI_DMA_TODEVICE);
677 	dev_kfree_skb_any(prev);
678 }
679 
680 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
681 			      struct rtw_pci_rx_ring *rx_ring,
682 			      u32 idx)
683 {
684 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
685 	struct rtw_chip_info *chip = rtwdev->chip;
686 	struct rtw_pci_rx_buffer_desc *buf_desc;
687 	u32 desc_sz = chip->rx_buf_desc_sz;
688 	u16 total_pkt_size;
689 
690 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
691 						     idx * desc_sz);
692 	total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
693 
694 	/* rx tag mismatch, throw a warning */
695 	if (total_pkt_size != rtwpci->rx_tag)
696 		rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
697 
698 	rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
699 }
700 
701 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
702 {
703 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
704 	struct rtw_pci_tx_ring *ring;
705 	u32 bd_idx;
706 
707 	ring = &rtwpci->tx_rings[queue];
708 	bd_idx = rtw_pci_tx_queue_idx_addr[queue];
709 
710 	spin_lock_bh(&rtwpci->irq_lock);
711 	rtw_pci_deep_ps_leave(rtwdev);
712 	rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
713 	spin_unlock_bh(&rtwpci->irq_lock);
714 }
715 
716 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
717 {
718 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
719 	u8 queue;
720 
721 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
722 		if (test_and_clear_bit(queue, rtwpci->tx_queued))
723 			rtw_pci_tx_kick_off_queue(rtwdev, queue);
724 }
725 
726 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
727 				 struct rtw_tx_pkt_info *pkt_info,
728 				 struct sk_buff *skb, u8 queue)
729 {
730 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
731 	struct rtw_chip_info *chip = rtwdev->chip;
732 	struct rtw_pci_tx_ring *ring;
733 	struct rtw_pci_tx_data *tx_data;
734 	dma_addr_t dma;
735 	u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
736 	u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
737 	u32 size;
738 	u32 psb_len;
739 	u8 *pkt_desc;
740 	struct rtw_pci_tx_buffer_desc *buf_desc;
741 
742 	ring = &rtwpci->tx_rings[queue];
743 
744 	size = skb->len;
745 
746 	if (queue == RTW_TX_QUEUE_BCN)
747 		rtw_pci_release_rsvd_page(rtwpci, ring);
748 	else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
749 		return -ENOSPC;
750 
751 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
752 	memset(pkt_desc, 0, tx_pkt_desc_sz);
753 	pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
754 	rtw_tx_fill_tx_desc(pkt_info, skb);
755 	dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
756 			     PCI_DMA_TODEVICE);
757 	if (pci_dma_mapping_error(rtwpci->pdev, dma))
758 		return -EBUSY;
759 
760 	/* after this we got dma mapped, there is no way back */
761 	buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
762 	memset(buf_desc, 0, tx_buf_desc_sz);
763 	psb_len = (skb->len - 1) / 128 + 1;
764 	if (queue == RTW_TX_QUEUE_BCN)
765 		psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
766 
767 	buf_desc[0].psb_len = cpu_to_le16(psb_len);
768 	buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
769 	buf_desc[0].dma = cpu_to_le32(dma);
770 	buf_desc[1].buf_size = cpu_to_le16(size);
771 	buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
772 
773 	tx_data = rtw_pci_get_tx_data(skb);
774 	tx_data->dma = dma;
775 	tx_data->sn = pkt_info->sn;
776 
777 	spin_lock_bh(&rtwpci->irq_lock);
778 
779 	skb_queue_tail(&ring->queue, skb);
780 
781 	if (queue == RTW_TX_QUEUE_BCN)
782 		goto out_unlock;
783 
784 	/* update write-index, and kick it off later */
785 	set_bit(queue, rtwpci->tx_queued);
786 	if (++ring->r.wp >= ring->r.len)
787 		ring->r.wp = 0;
788 
789 out_unlock:
790 	spin_unlock_bh(&rtwpci->irq_lock);
791 
792 	return 0;
793 }
794 
795 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
796 					u32 size)
797 {
798 	struct sk_buff *skb;
799 	struct rtw_tx_pkt_info pkt_info = {0};
800 	u8 reg_bcn_work;
801 	int ret;
802 
803 	skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
804 	if (!skb)
805 		return -ENOMEM;
806 
807 	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
808 	if (ret) {
809 		rtw_err(rtwdev, "failed to write rsvd page data\n");
810 		return ret;
811 	}
812 
813 	/* reserved pages go through beacon queue */
814 	reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
815 	reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
816 	rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
817 
818 	return 0;
819 }
820 
821 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
822 {
823 	struct sk_buff *skb;
824 	struct rtw_tx_pkt_info pkt_info = {0};
825 	int ret;
826 
827 	skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
828 	if (!skb)
829 		return -ENOMEM;
830 
831 	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
832 	if (ret) {
833 		rtw_err(rtwdev, "failed to write h2c data\n");
834 		return ret;
835 	}
836 
837 	rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
838 
839 	return 0;
840 }
841 
842 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
843 			    struct rtw_tx_pkt_info *pkt_info,
844 			    struct sk_buff *skb)
845 {
846 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
847 	struct rtw_pci_tx_ring *ring;
848 	u8 queue = rtw_hw_queue_mapping(skb);
849 	int ret;
850 
851 	ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
852 	if (ret)
853 		return ret;
854 
855 	ring = &rtwpci->tx_rings[queue];
856 	if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
857 		ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
858 		ring->queue_stopped = true;
859 	}
860 
861 	return 0;
862 }
863 
864 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
865 			   u8 hw_queue)
866 {
867 	struct ieee80211_hw *hw = rtwdev->hw;
868 	struct ieee80211_tx_info *info;
869 	struct rtw_pci_tx_ring *ring;
870 	struct rtw_pci_tx_data *tx_data;
871 	struct sk_buff *skb;
872 	u32 count;
873 	u32 bd_idx_addr;
874 	u32 bd_idx, cur_rp;
875 	u16 q_map;
876 
877 	ring = &rtwpci->tx_rings[hw_queue];
878 
879 	bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
880 	bd_idx = rtw_read32(rtwdev, bd_idx_addr);
881 	cur_rp = bd_idx >> 16;
882 	cur_rp &= TRX_BD_IDX_MASK;
883 	if (cur_rp >= ring->r.rp)
884 		count = cur_rp - ring->r.rp;
885 	else
886 		count = ring->r.len - (ring->r.rp - cur_rp);
887 
888 	while (count--) {
889 		skb = skb_dequeue(&ring->queue);
890 		if (!skb) {
891 			rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
892 				count, hw_queue, bd_idx, ring->r.rp, cur_rp);
893 			break;
894 		}
895 		tx_data = rtw_pci_get_tx_data(skb);
896 		pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
897 				 PCI_DMA_TODEVICE);
898 
899 		/* just free command packets from host to card */
900 		if (hw_queue == RTW_TX_QUEUE_H2C) {
901 			dev_kfree_skb_irq(skb);
902 			continue;
903 		}
904 
905 		if (ring->queue_stopped &&
906 		    avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
907 			q_map = skb_get_queue_mapping(skb);
908 			ieee80211_wake_queue(hw, q_map);
909 			ring->queue_stopped = false;
910 		}
911 
912 		skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
913 
914 		info = IEEE80211_SKB_CB(skb);
915 
916 		/* enqueue to wait for tx report */
917 		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
918 			rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
919 			continue;
920 		}
921 
922 		/* always ACK for others, then they won't be marked as drop */
923 		if (info->flags & IEEE80211_TX_CTL_NO_ACK)
924 			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
925 		else
926 			info->flags |= IEEE80211_TX_STAT_ACK;
927 
928 		ieee80211_tx_info_clear_status(info);
929 		ieee80211_tx_status_irqsafe(hw, skb);
930 	}
931 
932 	ring->r.rp = cur_rp;
933 }
934 
935 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
936 			   u8 hw_queue)
937 {
938 	struct rtw_chip_info *chip = rtwdev->chip;
939 	struct rtw_pci_rx_ring *ring;
940 	struct rtw_rx_pkt_stat pkt_stat;
941 	struct ieee80211_rx_status rx_status;
942 	struct sk_buff *skb, *new;
943 	u32 cur_wp, cur_rp, tmp;
944 	u32 count;
945 	u32 pkt_offset;
946 	u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
947 	u32 buf_desc_sz = chip->rx_buf_desc_sz;
948 	u32 new_len;
949 	u8 *rx_desc;
950 	dma_addr_t dma;
951 
952 	ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
953 
954 	tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
955 	cur_wp = tmp >> 16;
956 	cur_wp &= TRX_BD_IDX_MASK;
957 	if (cur_wp >= ring->r.wp)
958 		count = cur_wp - ring->r.wp;
959 	else
960 		count = ring->r.len - (ring->r.wp - cur_wp);
961 
962 	cur_rp = ring->r.rp;
963 	while (count--) {
964 		rtw_pci_dma_check(rtwdev, ring, cur_rp);
965 		skb = ring->buf[cur_rp];
966 		dma = *((dma_addr_t *)skb->cb);
967 		dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
968 					DMA_FROM_DEVICE);
969 		rx_desc = skb->data;
970 		chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
971 
972 		/* offset from rx_desc to payload */
973 		pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
974 			     pkt_stat.shift;
975 
976 		/* allocate a new skb for this frame,
977 		 * discard the frame if none available
978 		 */
979 		new_len = pkt_stat.pkt_len + pkt_offset;
980 		new = dev_alloc_skb(new_len);
981 		if (WARN_ONCE(!new, "rx routine starvation\n"))
982 			goto next_rp;
983 
984 		/* put the DMA data including rx_desc from phy to new skb */
985 		skb_put_data(new, skb->data, new_len);
986 
987 		if (pkt_stat.is_c2h) {
988 			rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
989 		} else {
990 			/* remove rx_desc */
991 			skb_pull(new, pkt_offset);
992 
993 			rtw_rx_stats(rtwdev, pkt_stat.vif, new);
994 			memcpy(new->cb, &rx_status, sizeof(rx_status));
995 			ieee80211_rx_irqsafe(rtwdev->hw, new);
996 		}
997 
998 next_rp:
999 		/* new skb delivered to mac80211, re-enable original skb DMA */
1000 		rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1001 					    buf_desc_sz);
1002 
1003 		/* host read next element in ring */
1004 		if (++cur_rp >= ring->r.len)
1005 			cur_rp = 0;
1006 	}
1007 
1008 	ring->r.rp = cur_rp;
1009 	ring->r.wp = cur_wp;
1010 	rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1011 }
1012 
1013 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1014 				   struct rtw_pci *rtwpci, u32 *irq_status)
1015 {
1016 	unsigned long flags;
1017 
1018 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1019 
1020 	irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1021 	irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1022 	if (rtw_chip_wcpu_11ac(rtwdev))
1023 		irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1024 	else
1025 		irq_status[3] = 0;
1026 	irq_status[0] &= rtwpci->irq_mask[0];
1027 	irq_status[1] &= rtwpci->irq_mask[1];
1028 	irq_status[3] &= rtwpci->irq_mask[3];
1029 	rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1030 	rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1031 	if (rtw_chip_wcpu_11ac(rtwdev))
1032 		rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1033 
1034 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1035 }
1036 
1037 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1038 {
1039 	struct rtw_dev *rtwdev = dev;
1040 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1041 
1042 	/* disable RTW PCI interrupt to avoid more interrupts before the end of
1043 	 * thread function
1044 	 *
1045 	 * disable HIMR here to also avoid new HISR flag being raised before
1046 	 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1047 	 * are cleared, the edge-triggered interrupt will not be generated when
1048 	 * a new HISR flag is set.
1049 	 */
1050 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1051 
1052 	return IRQ_WAKE_THREAD;
1053 }
1054 
1055 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1056 {
1057 	struct rtw_dev *rtwdev = dev;
1058 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1059 	u32 irq_status[4];
1060 
1061 	spin_lock_bh(&rtwpci->irq_lock);
1062 	rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1063 
1064 	if (irq_status[0] & IMR_MGNTDOK)
1065 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1066 	if (irq_status[0] & IMR_HIGHDOK)
1067 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1068 	if (irq_status[0] & IMR_BEDOK)
1069 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1070 	if (irq_status[0] & IMR_BKDOK)
1071 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1072 	if (irq_status[0] & IMR_VODOK)
1073 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1074 	if (irq_status[0] & IMR_VIDOK)
1075 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1076 	if (irq_status[3] & IMR_H2CDOK)
1077 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1078 	if (irq_status[0] & IMR_ROK)
1079 		rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
1080 
1081 	/* all of the jobs for this interrupt have been done */
1082 	rtw_pci_enable_interrupt(rtwdev, rtwpci);
1083 	spin_unlock_bh(&rtwpci->irq_lock);
1084 
1085 	return IRQ_HANDLED;
1086 }
1087 
1088 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1089 			      struct pci_dev *pdev)
1090 {
1091 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1092 	unsigned long len;
1093 	u8 bar_id = 2;
1094 	int ret;
1095 
1096 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
1097 	if (ret) {
1098 		rtw_err(rtwdev, "failed to request pci regions\n");
1099 		return ret;
1100 	}
1101 
1102 	len = pci_resource_len(pdev, bar_id);
1103 	rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1104 	if (!rtwpci->mmap) {
1105 		pci_release_regions(pdev);
1106 		rtw_err(rtwdev, "failed to map pci memory\n");
1107 		return -ENOMEM;
1108 	}
1109 
1110 	return 0;
1111 }
1112 
1113 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1114 				 struct pci_dev *pdev)
1115 {
1116 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1117 
1118 	if (rtwpci->mmap) {
1119 		pci_iounmap(pdev, rtwpci->mmap);
1120 		pci_release_regions(pdev);
1121 	}
1122 }
1123 
1124 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1125 {
1126 	u16 write_addr;
1127 	u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1128 	u8 flag;
1129 	u8 cnt;
1130 
1131 	write_addr = addr & BITS_DBI_ADDR_MASK;
1132 	write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1133 	rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1134 	rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1135 	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1136 
1137 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1138 		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1139 		if (flag == 0)
1140 			return;
1141 
1142 		udelay(10);
1143 	}
1144 
1145 	WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1146 }
1147 
1148 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1149 {
1150 	u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1151 	u8 flag;
1152 	u8 cnt;
1153 
1154 	rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1155 	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1156 
1157 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1158 		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1159 		if (flag == 0) {
1160 			read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1161 			*value = rtw_read8(rtwdev, read_addr);
1162 			return 0;
1163 		}
1164 
1165 		udelay(10);
1166 	}
1167 
1168 	WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1169 	return -EIO;
1170 }
1171 
1172 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1173 {
1174 	u8 page;
1175 	u8 wflag;
1176 	u8 cnt;
1177 
1178 	rtw_write16(rtwdev, REG_MDIO_V1, data);
1179 
1180 	page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1181 	page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1182 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1183 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1184 	rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1185 
1186 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1187 		wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1188 					BIT_MDIO_WFLAG_V1);
1189 		if (wflag == 0)
1190 			return;
1191 
1192 		udelay(10);
1193 	}
1194 
1195 	WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1196 }
1197 
1198 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1199 {
1200 	u8 value;
1201 	int ret;
1202 
1203 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1204 	if (ret) {
1205 		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1206 		return;
1207 	}
1208 
1209 	if (enable)
1210 		value |= BIT_CLKREQ_SW_EN;
1211 	else
1212 		value &= ~BIT_CLKREQ_SW_EN;
1213 
1214 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1215 }
1216 
1217 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1218 {
1219 	u8 value;
1220 	int ret;
1221 
1222 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1223 	if (ret) {
1224 		rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1225 		return;
1226 	}
1227 
1228 	if (enable)
1229 		value |= BIT_L1_SW_EN;
1230 	else
1231 		value &= ~BIT_L1_SW_EN;
1232 
1233 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1234 }
1235 
1236 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1237 {
1238 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1239 
1240 	/* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1241 	 * only be enabled when host supports it.
1242 	 *
1243 	 * And ASPM mechanism should be enabled when driver/firmware enters
1244 	 * power save mode, without having heavy traffic. Because we've
1245 	 * experienced some inter-operability issues that the link tends
1246 	 * to enter L1 state on the fly even when driver is having high
1247 	 * throughput. This is probably because the ASPM behavior slightly
1248 	 * varies from different SOC.
1249 	 */
1250 	if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
1251 		rtw_pci_aspm_set(rtwdev, enter);
1252 }
1253 
1254 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1255 {
1256 	struct rtw_chip_info *chip = rtwdev->chip;
1257 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1258 	struct pci_dev *pdev = rtwpci->pdev;
1259 	u16 link_ctrl;
1260 	int ret;
1261 
1262 	/* RTL8822CE has enabled REFCLK auto calibration, it does not need
1263 	 * to add clock delay to cover the REFCLK timing gap.
1264 	 */
1265 	if (chip->id == RTW_CHIP_TYPE_8822C)
1266 		rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1267 
1268 	/* Though there is standard PCIE configuration space to set the
1269 	 * link control register, but by Realtek's design, driver should
1270 	 * check if host supports CLKREQ/ASPM to enable the HW module.
1271 	 *
1272 	 * These functions are implemented by two HW modules associated,
1273 	 * one is responsible to access PCIE configuration space to
1274 	 * follow the host settings, and another is in charge of doing
1275 	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1276 	 * the host does not support it, and due to some reasons or wrong
1277 	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1278 	 * loss if HW misbehaves on the link.
1279 	 *
1280 	 * Hence it's designed that driver should first check the PCIE
1281 	 * configuration space is sync'ed and enabled, then driver can turn
1282 	 * on the other module that is actually working on the mechanism.
1283 	 */
1284 	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1285 	if (ret) {
1286 		rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1287 		return;
1288 	}
1289 
1290 	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1291 		rtw_pci_clkreq_set(rtwdev, true);
1292 
1293 	rtwpci->link_ctrl = link_ctrl;
1294 }
1295 
1296 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1297 {
1298 	struct rtw_chip_info *chip = rtwdev->chip;
1299 
1300 	switch (chip->id) {
1301 	case RTW_CHIP_TYPE_8822C:
1302 		if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1303 			rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1304 					 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1305 		break;
1306 	default:
1307 		break;
1308 	}
1309 }
1310 
1311 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1312 {
1313 	struct rtw_chip_info *chip = rtwdev->chip;
1314 	const struct rtw_intf_phy_para *para;
1315 	u16 cut;
1316 	u16 value;
1317 	u16 offset;
1318 	int i;
1319 
1320 	cut = BIT(0) << rtwdev->hal.cut_version;
1321 
1322 	for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1323 		para = &chip->intf_table->gen1_para[i];
1324 		if (!(para->cut_mask & cut))
1325 			continue;
1326 		if (para->offset == 0xffff)
1327 			break;
1328 		offset = para->offset;
1329 		value = para->value;
1330 		if (para->ip_sel == RTW_IP_SEL_PHY)
1331 			rtw_mdio_write(rtwdev, offset, value, true);
1332 		else
1333 			rtw_dbi_write8(rtwdev, offset, value);
1334 	}
1335 
1336 	for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1337 		para = &chip->intf_table->gen2_para[i];
1338 		if (!(para->cut_mask & cut))
1339 			continue;
1340 		if (para->offset == 0xffff)
1341 			break;
1342 		offset = para->offset;
1343 		value = para->value;
1344 		if (para->ip_sel == RTW_IP_SEL_PHY)
1345 			rtw_mdio_write(rtwdev, offset, value, false);
1346 		else
1347 			rtw_dbi_write8(rtwdev, offset, value);
1348 	}
1349 
1350 	rtw_pci_link_cfg(rtwdev);
1351 }
1352 
1353 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1354 {
1355 	return 0;
1356 }
1357 
1358 static int __maybe_unused rtw_pci_resume(struct device *dev)
1359 {
1360 	return 0;
1361 }
1362 
1363 SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1364 EXPORT_SYMBOL(rtw_pm_ops);
1365 
1366 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1367 {
1368 	int ret;
1369 
1370 	ret = pci_enable_device(pdev);
1371 	if (ret) {
1372 		rtw_err(rtwdev, "failed to enable pci device\n");
1373 		return ret;
1374 	}
1375 
1376 	pci_set_master(pdev);
1377 	pci_set_drvdata(pdev, rtwdev->hw);
1378 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1379 
1380 	return 0;
1381 }
1382 
1383 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1384 {
1385 	pci_clear_master(pdev);
1386 	pci_disable_device(pdev);
1387 }
1388 
1389 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1390 {
1391 	struct rtw_pci *rtwpci;
1392 	int ret;
1393 
1394 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1395 	rtwpci->pdev = pdev;
1396 
1397 	/* after this driver can access to hw registers */
1398 	ret = rtw_pci_io_mapping(rtwdev, pdev);
1399 	if (ret) {
1400 		rtw_err(rtwdev, "failed to request pci io region\n");
1401 		goto err_out;
1402 	}
1403 
1404 	ret = rtw_pci_init(rtwdev);
1405 	if (ret) {
1406 		rtw_err(rtwdev, "failed to allocate pci resources\n");
1407 		goto err_io_unmap;
1408 	}
1409 
1410 	return 0;
1411 
1412 err_io_unmap:
1413 	rtw_pci_io_unmapping(rtwdev, pdev);
1414 
1415 err_out:
1416 	return ret;
1417 }
1418 
1419 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1420 {
1421 	rtw_pci_deinit(rtwdev);
1422 	rtw_pci_io_unmapping(rtwdev, pdev);
1423 }
1424 
1425 static struct rtw_hci_ops rtw_pci_ops = {
1426 	.tx_write = rtw_pci_tx_write,
1427 	.tx_kick_off = rtw_pci_tx_kick_off,
1428 	.setup = rtw_pci_setup,
1429 	.start = rtw_pci_start,
1430 	.stop = rtw_pci_stop,
1431 	.deep_ps = rtw_pci_deep_ps,
1432 	.link_ps = rtw_pci_link_ps,
1433 	.interface_cfg = rtw_pci_interface_cfg,
1434 
1435 	.read8 = rtw_pci_read8,
1436 	.read16 = rtw_pci_read16,
1437 	.read32 = rtw_pci_read32,
1438 	.write8 = rtw_pci_write8,
1439 	.write16 = rtw_pci_write16,
1440 	.write32 = rtw_pci_write32,
1441 	.write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1442 	.write_data_h2c = rtw_pci_write_data_h2c,
1443 };
1444 
1445 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1446 {
1447 	unsigned int flags = PCI_IRQ_LEGACY;
1448 	int ret;
1449 
1450 	if (!rtw_disable_msi)
1451 		flags |= PCI_IRQ_MSI;
1452 
1453 	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1454 	if (ret < 0) {
1455 		rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1456 		return ret;
1457 	}
1458 
1459 	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1460 					rtw_pci_interrupt_handler,
1461 					rtw_pci_interrupt_threadfn,
1462 					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1463 	if (ret) {
1464 		rtw_err(rtwdev, "failed to request irq %d\n", ret);
1465 		pci_free_irq_vectors(pdev);
1466 	}
1467 
1468 	return ret;
1469 }
1470 
1471 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1472 {
1473 	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1474 	pci_free_irq_vectors(pdev);
1475 }
1476 
1477 int rtw_pci_probe(struct pci_dev *pdev,
1478 		  const struct pci_device_id *id)
1479 {
1480 	struct ieee80211_hw *hw;
1481 	struct rtw_dev *rtwdev;
1482 	int drv_data_size;
1483 	int ret;
1484 
1485 	drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1486 	hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1487 	if (!hw) {
1488 		dev_err(&pdev->dev, "failed to allocate hw\n");
1489 		return -ENOMEM;
1490 	}
1491 
1492 	rtwdev = hw->priv;
1493 	rtwdev->hw = hw;
1494 	rtwdev->dev = &pdev->dev;
1495 	rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1496 	rtwdev->hci.ops = &rtw_pci_ops;
1497 	rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1498 
1499 	ret = rtw_core_init(rtwdev);
1500 	if (ret)
1501 		goto err_release_hw;
1502 
1503 	rtw_dbg(rtwdev, RTW_DBG_PCI,
1504 		"rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1505 		pdev->vendor, pdev->device, pdev->revision);
1506 
1507 	ret = rtw_pci_claim(rtwdev, pdev);
1508 	if (ret) {
1509 		rtw_err(rtwdev, "failed to claim pci device\n");
1510 		goto err_deinit_core;
1511 	}
1512 
1513 	ret = rtw_pci_setup_resource(rtwdev, pdev);
1514 	if (ret) {
1515 		rtw_err(rtwdev, "failed to setup pci resources\n");
1516 		goto err_pci_declaim;
1517 	}
1518 
1519 	ret = rtw_chip_info_setup(rtwdev);
1520 	if (ret) {
1521 		rtw_err(rtwdev, "failed to setup chip information\n");
1522 		goto err_destroy_pci;
1523 	}
1524 
1525 	rtw_pci_phy_cfg(rtwdev);
1526 
1527 	ret = rtw_register_hw(rtwdev, hw);
1528 	if (ret) {
1529 		rtw_err(rtwdev, "failed to register hw\n");
1530 		goto err_destroy_pci;
1531 	}
1532 
1533 	ret = rtw_pci_request_irq(rtwdev, pdev);
1534 	if (ret) {
1535 		ieee80211_unregister_hw(hw);
1536 		goto err_destroy_pci;
1537 	}
1538 
1539 	return 0;
1540 
1541 err_destroy_pci:
1542 	rtw_pci_destroy(rtwdev, pdev);
1543 
1544 err_pci_declaim:
1545 	rtw_pci_declaim(rtwdev, pdev);
1546 
1547 err_deinit_core:
1548 	rtw_core_deinit(rtwdev);
1549 
1550 err_release_hw:
1551 	ieee80211_free_hw(hw);
1552 
1553 	return ret;
1554 }
1555 EXPORT_SYMBOL(rtw_pci_probe);
1556 
1557 void rtw_pci_remove(struct pci_dev *pdev)
1558 {
1559 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1560 	struct rtw_dev *rtwdev;
1561 	struct rtw_pci *rtwpci;
1562 
1563 	if (!hw)
1564 		return;
1565 
1566 	rtwdev = hw->priv;
1567 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1568 
1569 	rtw_unregister_hw(rtwdev, hw);
1570 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1571 	rtw_pci_destroy(rtwdev, pdev);
1572 	rtw_pci_declaim(rtwdev, pdev);
1573 	rtw_pci_free_irq(rtwdev, pdev);
1574 	rtw_core_deinit(rtwdev);
1575 	ieee80211_free_hw(hw);
1576 }
1577 EXPORT_SYMBOL(rtw_pci_remove);
1578 
1579 void rtw_pci_shutdown(struct pci_dev *pdev)
1580 {
1581 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1582 	struct rtw_dev *rtwdev;
1583 	struct rtw_chip_info *chip;
1584 
1585 	if (!hw)
1586 		return;
1587 
1588 	rtwdev = hw->priv;
1589 	chip = rtwdev->chip;
1590 
1591 	if (chip->ops->shutdown)
1592 		chip->ops->shutdown(rtwdev);
1593 }
1594 EXPORT_SYMBOL(rtw_pci_shutdown);
1595 
1596 static const struct pci_device_id rtw_pci_id_table[] = {
1597 	{},
1598 };
1599 MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
1600 
1601 static struct pci_driver rtw_pci_driver = {
1602 	.name = "rtw_pci",
1603 	.id_table = rtw_pci_id_table,
1604 	.probe = rtw_pci_probe,
1605 	.remove = rtw_pci_remove,
1606 	.driver.pm = &rtw_pm_ops,
1607 	.shutdown = rtw_pci_shutdown,
1608 };
1609 module_pci_driver(rtw_pci_driver);
1610 
1611 MODULE_AUTHOR("Realtek Corporation");
1612 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1613 MODULE_LICENSE("Dual BSD/GPL");
1614