xref: /linux/drivers/net/wireless/realtek/rtw88/pci.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include "main.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "tx.h"
11 #include "rx.h"
12 #include "fw.h"
13 #include "ps.h"
14 #include "debug.h"
15 
16 static bool rtw_disable_msi;
17 static bool rtw_pci_disable_aspm;
18 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
19 module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
20 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
21 MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
22 
23 static u32 rtw_pci_tx_queue_idx_addr[] = {
24 	[RTW_TX_QUEUE_BK]	= RTK_PCI_TXBD_IDX_BKQ,
25 	[RTW_TX_QUEUE_BE]	= RTK_PCI_TXBD_IDX_BEQ,
26 	[RTW_TX_QUEUE_VI]	= RTK_PCI_TXBD_IDX_VIQ,
27 	[RTW_TX_QUEUE_VO]	= RTK_PCI_TXBD_IDX_VOQ,
28 	[RTW_TX_QUEUE_MGMT]	= RTK_PCI_TXBD_IDX_MGMTQ,
29 	[RTW_TX_QUEUE_HI0]	= RTK_PCI_TXBD_IDX_HI0Q,
30 	[RTW_TX_QUEUE_H2C]	= RTK_PCI_TXBD_IDX_H2CQ,
31 };
32 
rtw_pci_get_tx_qsel(struct sk_buff * skb,enum rtw_tx_queue_type queue)33 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,
34 			      enum rtw_tx_queue_type queue)
35 {
36 	switch (queue) {
37 	case RTW_TX_QUEUE_BCN:
38 		return TX_DESC_QSEL_BEACON;
39 	case RTW_TX_QUEUE_H2C:
40 		return TX_DESC_QSEL_H2C;
41 	case RTW_TX_QUEUE_MGMT:
42 		return TX_DESC_QSEL_MGMT;
43 	case RTW_TX_QUEUE_HI0:
44 		return TX_DESC_QSEL_HIGH;
45 	default:
46 		return skb->priority;
47 	}
48 };
49 
rtw_pci_read8(struct rtw_dev * rtwdev,u32 addr)50 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
51 {
52 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
53 
54 	return readb(rtwpci->mmap + addr);
55 }
56 
rtw_pci_read16(struct rtw_dev * rtwdev,u32 addr)57 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
58 {
59 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
60 
61 	return readw(rtwpci->mmap + addr);
62 }
63 
rtw_pci_read32(struct rtw_dev * rtwdev,u32 addr)64 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
65 {
66 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
67 
68 	return readl(rtwpci->mmap + addr);
69 }
70 
rtw_pci_write8(struct rtw_dev * rtwdev,u32 addr,u8 val)71 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
72 {
73 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
74 
75 	writeb(val, rtwpci->mmap + addr);
76 }
77 
rtw_pci_write16(struct rtw_dev * rtwdev,u32 addr,u16 val)78 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
79 {
80 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
81 
82 	writew(val, rtwpci->mmap + addr);
83 }
84 
rtw_pci_write32(struct rtw_dev * rtwdev,u32 addr,u32 val)85 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
86 {
87 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
88 
89 	writel(val, rtwpci->mmap + addr);
90 }
91 
rtw_pci_free_tx_ring_skbs(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring)92 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
93 				      struct rtw_pci_tx_ring *tx_ring)
94 {
95 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
96 	struct rtw_pci_tx_data *tx_data;
97 	struct sk_buff *skb, *tmp;
98 	dma_addr_t dma;
99 
100 	/* free every skb remained in tx list */
101 	skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
102 		__skb_unlink(skb, &tx_ring->queue);
103 		tx_data = rtw_pci_get_tx_data(skb);
104 		dma = tx_data->dma;
105 
106 		dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
107 		dev_kfree_skb_any(skb);
108 	}
109 }
110 
rtw_pci_free_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring)111 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
112 				 struct rtw_pci_tx_ring *tx_ring)
113 {
114 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
115 	u8 *head = tx_ring->r.head;
116 	u32 len = tx_ring->r.len;
117 	int ring_sz = len * tx_ring->r.desc_size;
118 
119 	rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
120 
121 	/* free the ring itself */
122 	dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
123 	tx_ring->r.head = NULL;
124 }
125 
rtw_pci_free_rx_ring_skbs(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring)126 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
127 				      struct rtw_pci_rx_ring *rx_ring)
128 {
129 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
130 	struct sk_buff *skb;
131 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
132 	dma_addr_t dma;
133 	int i;
134 
135 	for (i = 0; i < rx_ring->r.len; i++) {
136 		skb = rx_ring->buf[i];
137 		if (!skb)
138 			continue;
139 
140 		dma = *((dma_addr_t *)skb->cb);
141 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
142 		dev_kfree_skb(skb);
143 		rx_ring->buf[i] = NULL;
144 	}
145 }
146 
rtw_pci_free_rx_ring(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring)147 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
148 				 struct rtw_pci_rx_ring *rx_ring)
149 {
150 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
151 	u8 *head = rx_ring->r.head;
152 	int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
153 
154 	rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
155 
156 	dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
157 }
158 
rtw_pci_free_trx_ring(struct rtw_dev * rtwdev)159 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
160 {
161 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
162 	struct rtw_pci_tx_ring *tx_ring;
163 	struct rtw_pci_rx_ring *rx_ring;
164 	int i;
165 
166 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
167 		tx_ring = &rtwpci->tx_rings[i];
168 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
169 	}
170 
171 	for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
172 		rx_ring = &rtwpci->rx_rings[i];
173 		rtw_pci_free_rx_ring(rtwdev, rx_ring);
174 	}
175 }
176 
rtw_pci_init_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring,u8 desc_size,u32 len)177 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
178 				struct rtw_pci_tx_ring *tx_ring,
179 				u8 desc_size, u32 len)
180 {
181 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
182 	int ring_sz = desc_size * len;
183 	dma_addr_t dma;
184 	u8 *head;
185 
186 	if (len > TRX_BD_IDX_MASK) {
187 		rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
188 		return -EINVAL;
189 	}
190 
191 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
192 	if (!head) {
193 		rtw_err(rtwdev, "failed to allocate tx ring\n");
194 		return -ENOMEM;
195 	}
196 
197 	skb_queue_head_init(&tx_ring->queue);
198 	tx_ring->r.head = head;
199 	tx_ring->r.dma = dma;
200 	tx_ring->r.len = len;
201 	tx_ring->r.desc_size = desc_size;
202 	tx_ring->r.wp = 0;
203 	tx_ring->r.rp = 0;
204 
205 	return 0;
206 }
207 
rtw_pci_reset_rx_desc(struct rtw_dev * rtwdev,struct sk_buff * skb,struct rtw_pci_rx_ring * rx_ring,u32 idx,u32 desc_sz)208 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
209 				 struct rtw_pci_rx_ring *rx_ring,
210 				 u32 idx, u32 desc_sz)
211 {
212 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
213 	struct rtw_pci_rx_buffer_desc *buf_desc;
214 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
215 	dma_addr_t dma;
216 
217 	if (!skb)
218 		return -EINVAL;
219 
220 	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
221 	if (dma_mapping_error(&pdev->dev, dma))
222 		return -EBUSY;
223 
224 	*((dma_addr_t *)skb->cb) = dma;
225 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
226 						     idx * desc_sz);
227 	memset(buf_desc, 0, sizeof(*buf_desc));
228 	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
229 	buf_desc->dma = cpu_to_le32(dma);
230 
231 	return 0;
232 }
233 
rtw_pci_sync_rx_desc_device(struct rtw_dev * rtwdev,dma_addr_t dma,struct rtw_pci_rx_ring * rx_ring,u32 idx,u32 desc_sz)234 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
235 					struct rtw_pci_rx_ring *rx_ring,
236 					u32 idx, u32 desc_sz)
237 {
238 	struct device *dev = rtwdev->dev;
239 	struct rtw_pci_rx_buffer_desc *buf_desc;
240 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
241 
242 	dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
243 
244 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
245 						     idx * desc_sz);
246 	memset(buf_desc, 0, sizeof(*buf_desc));
247 	buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
248 	buf_desc->dma = cpu_to_le32(dma);
249 }
250 
rtw_pci_init_rx_ring(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring,u8 desc_size,u32 len)251 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
252 				struct rtw_pci_rx_ring *rx_ring,
253 				u8 desc_size, u32 len)
254 {
255 	struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
256 	struct sk_buff *skb = NULL;
257 	dma_addr_t dma;
258 	u8 *head;
259 	int ring_sz = desc_size * len;
260 	int buf_sz = RTK_PCI_RX_BUF_SIZE;
261 	int i, allocated;
262 	int ret = 0;
263 
264 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
265 	if (!head) {
266 		rtw_err(rtwdev, "failed to allocate rx ring\n");
267 		return -ENOMEM;
268 	}
269 	rx_ring->r.head = head;
270 
271 	for (i = 0; i < len; i++) {
272 		skb = dev_alloc_skb(buf_sz);
273 		if (!skb) {
274 			allocated = i;
275 			ret = -ENOMEM;
276 			goto err_out;
277 		}
278 
279 		memset(skb->data, 0, buf_sz);
280 		rx_ring->buf[i] = skb;
281 		ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
282 		if (ret) {
283 			allocated = i;
284 			dev_kfree_skb_any(skb);
285 			goto err_out;
286 		}
287 	}
288 
289 	rx_ring->r.dma = dma;
290 	rx_ring->r.len = len;
291 	rx_ring->r.desc_size = desc_size;
292 	rx_ring->r.wp = 0;
293 	rx_ring->r.rp = 0;
294 
295 	return 0;
296 
297 err_out:
298 	for (i = 0; i < allocated; i++) {
299 		skb = rx_ring->buf[i];
300 		if (!skb)
301 			continue;
302 		dma = *((dma_addr_t *)skb->cb);
303 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
304 		dev_kfree_skb_any(skb);
305 		rx_ring->buf[i] = NULL;
306 	}
307 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
308 
309 	rtw_err(rtwdev, "failed to init rx buffer\n");
310 
311 	return ret;
312 }
313 
rtw_pci_init_trx_ring(struct rtw_dev * rtwdev)314 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
315 {
316 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
317 	struct rtw_pci_tx_ring *tx_ring;
318 	struct rtw_pci_rx_ring *rx_ring;
319 	const struct rtw_chip_info *chip = rtwdev->chip;
320 	int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
321 	int tx_desc_size, rx_desc_size;
322 	u32 len;
323 	int ret;
324 
325 	tx_desc_size = chip->tx_buf_desc_sz;
326 
327 	for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
328 		tx_ring = &rtwpci->tx_rings[i];
329 		len = max_num_of_tx_queue(i);
330 		ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
331 		if (ret)
332 			goto out;
333 	}
334 
335 	rx_desc_size = chip->rx_buf_desc_sz;
336 
337 	for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
338 		rx_ring = &rtwpci->rx_rings[j];
339 		ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
340 					   RTK_MAX_RX_DESC_NUM);
341 		if (ret)
342 			goto out;
343 	}
344 
345 	return 0;
346 
347 out:
348 	tx_alloced = i;
349 	for (i = 0; i < tx_alloced; i++) {
350 		tx_ring = &rtwpci->tx_rings[i];
351 		rtw_pci_free_tx_ring(rtwdev, tx_ring);
352 	}
353 
354 	rx_alloced = j;
355 	for (j = 0; j < rx_alloced; j++) {
356 		rx_ring = &rtwpci->rx_rings[j];
357 		rtw_pci_free_rx_ring(rtwdev, rx_ring);
358 	}
359 
360 	return ret;
361 }
362 
rtw_pci_deinit(struct rtw_dev * rtwdev)363 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
364 {
365 	rtw_pci_free_trx_ring(rtwdev);
366 }
367 
rtw_pci_init(struct rtw_dev * rtwdev)368 static int rtw_pci_init(struct rtw_dev *rtwdev)
369 {
370 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
371 	int ret = 0;
372 
373 	rtwpci->irq_mask[0] = IMR_HIGHDOK |
374 			      IMR_MGNTDOK |
375 			      IMR_BKDOK |
376 			      IMR_BEDOK |
377 			      IMR_VIDOK |
378 			      IMR_VODOK |
379 			      IMR_ROK |
380 			      IMR_BCNDMAINT_E |
381 			      IMR_C2HCMD |
382 			      0;
383 	rtwpci->irq_mask[1] = IMR_TXFOVW |
384 			      0;
385 	rtwpci->irq_mask[3] = IMR_H2CDOK |
386 			      0;
387 	spin_lock_init(&rtwpci->irq_lock);
388 	spin_lock_init(&rtwpci->hwirq_lock);
389 	ret = rtw_pci_init_trx_ring(rtwdev);
390 
391 	return ret;
392 }
393 
rtw_pci_reset_buf_desc(struct rtw_dev * rtwdev)394 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
395 {
396 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
397 	u32 len;
398 	u8 tmp;
399 	dma_addr_t dma;
400 
401 	tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
402 	rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
403 
404 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
405 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
406 
407 	if (!rtw_chip_wcpu_11n(rtwdev)) {
408 		len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
409 		dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
410 		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
411 		rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
412 		rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
413 		rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
414 	}
415 
416 	len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
417 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
418 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
419 	rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
420 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
421 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
422 
423 	len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
424 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
425 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
426 	rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
427 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
428 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
429 
430 	len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
431 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
432 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
433 	rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
434 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
435 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
436 
437 	len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
438 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
439 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
440 	rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
441 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
442 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
443 
444 	len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
445 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
446 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
447 	rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
448 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
449 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
450 
451 	len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
452 	dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
453 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
454 	rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
455 	rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
456 	rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
457 
458 	len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
459 	dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
460 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
461 	rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
462 	rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
463 	rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
464 
465 	/* reset read/write point */
466 	rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
467 
468 	/* reset H2C Queue index in a single write */
469 	if (rtw_chip_wcpu_11ac(rtwdev))
470 		rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
471 				BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
472 }
473 
rtw_pci_reset_trx_ring(struct rtw_dev * rtwdev)474 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
475 {
476 	rtw_pci_reset_buf_desc(rtwdev);
477 }
478 
rtw_pci_enable_interrupt(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,bool exclude_rx)479 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
480 				     struct rtw_pci *rtwpci, bool exclude_rx)
481 {
482 	unsigned long flags;
483 	u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
484 
485 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
486 
487 	rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
488 	rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
489 	if (rtw_chip_wcpu_11ac(rtwdev))
490 		rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
491 
492 	rtwpci->irq_enabled = true;
493 
494 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
495 }
496 
rtw_pci_disable_interrupt(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)497 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
498 				      struct rtw_pci *rtwpci)
499 {
500 	unsigned long flags;
501 
502 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
503 
504 	if (!rtwpci->irq_enabled)
505 		goto out;
506 
507 	rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
508 	rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
509 	if (rtw_chip_wcpu_11ac(rtwdev))
510 		rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
511 
512 	rtwpci->irq_enabled = false;
513 
514 out:
515 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
516 }
517 
rtw_pci_dma_reset(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)518 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
519 {
520 	/* reset dma and rx tag */
521 	rtw_write32_set(rtwdev, RTK_PCI_CTRL,
522 			BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
523 	rtwpci->rx_tag = 0;
524 }
525 
rtw_pci_setup(struct rtw_dev * rtwdev)526 static int rtw_pci_setup(struct rtw_dev *rtwdev)
527 {
528 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
529 
530 	rtw_pci_reset_trx_ring(rtwdev);
531 	rtw_pci_dma_reset(rtwdev, rtwpci);
532 
533 	return 0;
534 }
535 
rtw_pci_dma_release(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)536 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
537 {
538 	struct rtw_pci_tx_ring *tx_ring;
539 	enum rtw_tx_queue_type queue;
540 
541 	rtw_pci_reset_trx_ring(rtwdev);
542 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
543 		tx_ring = &rtwpci->tx_rings[queue];
544 		rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
545 	}
546 }
547 
rtw_pci_napi_start(struct rtw_dev * rtwdev)548 static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
549 {
550 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
551 
552 	if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
553 		return;
554 
555 	napi_enable(&rtwpci->napi);
556 }
557 
rtw_pci_napi_stop(struct rtw_dev * rtwdev)558 static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
559 {
560 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
561 
562 	if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
563 		return;
564 
565 	napi_synchronize(&rtwpci->napi);
566 	napi_disable(&rtwpci->napi);
567 }
568 
rtw_pci_start(struct rtw_dev * rtwdev)569 static int rtw_pci_start(struct rtw_dev *rtwdev)
570 {
571 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
572 
573 	rtw_pci_napi_start(rtwdev);
574 
575 	spin_lock_bh(&rtwpci->irq_lock);
576 	rtwpci->running = true;
577 	rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
578 	spin_unlock_bh(&rtwpci->irq_lock);
579 
580 	return 0;
581 }
582 
rtw_pci_stop(struct rtw_dev * rtwdev)583 static void rtw_pci_stop(struct rtw_dev *rtwdev)
584 {
585 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
586 	struct pci_dev *pdev = rtwpci->pdev;
587 
588 	spin_lock_bh(&rtwpci->irq_lock);
589 	rtwpci->running = false;
590 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
591 	spin_unlock_bh(&rtwpci->irq_lock);
592 
593 	synchronize_irq(pdev->irq);
594 	rtw_pci_napi_stop(rtwdev);
595 
596 	spin_lock_bh(&rtwpci->irq_lock);
597 	rtw_pci_dma_release(rtwdev, rtwpci);
598 	spin_unlock_bh(&rtwpci->irq_lock);
599 }
600 
rtw_pci_deep_ps_enter(struct rtw_dev * rtwdev)601 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
602 {
603 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
604 	struct rtw_pci_tx_ring *tx_ring;
605 	enum rtw_tx_queue_type queue;
606 	bool tx_empty = true;
607 
608 	if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
609 		goto enter_deep_ps;
610 
611 	lockdep_assert_held(&rtwpci->irq_lock);
612 
613 	/* Deep PS state is not allowed to TX-DMA */
614 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
615 		/* BCN queue is rsvd page, does not have DMA interrupt
616 		 * H2C queue is managed by firmware
617 		 */
618 		if (queue == RTW_TX_QUEUE_BCN ||
619 		    queue == RTW_TX_QUEUE_H2C)
620 			continue;
621 
622 		tx_ring = &rtwpci->tx_rings[queue];
623 
624 		/* check if there is any skb DMAing */
625 		if (skb_queue_len(&tx_ring->queue)) {
626 			tx_empty = false;
627 			break;
628 		}
629 	}
630 
631 	if (!tx_empty) {
632 		rtw_dbg(rtwdev, RTW_DBG_PS,
633 			"TX path not empty, cannot enter deep power save state\n");
634 		return;
635 	}
636 enter_deep_ps:
637 	set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
638 	rtw_power_mode_change(rtwdev, true);
639 }
640 
rtw_pci_deep_ps_leave(struct rtw_dev * rtwdev)641 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
642 {
643 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
644 
645 	lockdep_assert_held(&rtwpci->irq_lock);
646 
647 	if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
648 		rtw_power_mode_change(rtwdev, false);
649 }
650 
rtw_pci_deep_ps(struct rtw_dev * rtwdev,bool enter)651 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
652 {
653 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
654 
655 	spin_lock_bh(&rtwpci->irq_lock);
656 
657 	if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
658 		rtw_pci_deep_ps_enter(rtwdev);
659 
660 	if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
661 		rtw_pci_deep_ps_leave(rtwdev);
662 
663 	spin_unlock_bh(&rtwpci->irq_lock);
664 }
665 
rtw_pci_release_rsvd_page(struct rtw_pci * rtwpci,struct rtw_pci_tx_ring * ring)666 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
667 				      struct rtw_pci_tx_ring *ring)
668 {
669 	struct sk_buff *prev = skb_dequeue(&ring->queue);
670 	struct rtw_pci_tx_data *tx_data;
671 	dma_addr_t dma;
672 
673 	if (!prev)
674 		return;
675 
676 	tx_data = rtw_pci_get_tx_data(prev);
677 	dma = tx_data->dma;
678 	dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
679 	dev_kfree_skb_any(prev);
680 }
681 
rtw_pci_dma_check(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring,u32 idx)682 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
683 			      struct rtw_pci_rx_ring *rx_ring,
684 			      u32 idx)
685 {
686 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
687 	const struct rtw_chip_info *chip = rtwdev->chip;
688 	struct rtw_pci_rx_buffer_desc *buf_desc;
689 	u32 desc_sz = chip->rx_buf_desc_sz;
690 	u16 total_pkt_size;
691 
692 	buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
693 						     idx * desc_sz);
694 	total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
695 
696 	/* rx tag mismatch, throw a warning */
697 	if (total_pkt_size != rtwpci->rx_tag)
698 		rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
699 
700 	rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
701 }
702 
__pci_get_hw_tx_ring_rp(struct rtw_dev * rtwdev,u8 pci_q)703 static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
704 {
705 	u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
706 	u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
707 
708 	return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
709 }
710 
__pci_flush_queue(struct rtw_dev * rtwdev,u8 pci_q,bool drop)711 static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
712 {
713 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
714 	struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
715 	u32 cur_rp;
716 	u8 i;
717 
718 	/* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
719 	 * bit dynamic, it's hard to define a reasonable fixed total timeout to
720 	 * use read_poll_timeout* helper. Instead, we can ensure a reasonable
721 	 * polling times, so we just use for loop with udelay here.
722 	 */
723 	for (i = 0; i < 30; i++) {
724 		cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
725 		if (cur_rp == ring->r.wp)
726 			return;
727 
728 		udelay(1);
729 	}
730 
731 	if (!drop)
732 		rtw_dbg(rtwdev, RTW_DBG_UNEXP,
733 			"timed out to flush pci tx ring[%d]\n", pci_q);
734 }
735 
__rtw_pci_flush_queues(struct rtw_dev * rtwdev,u32 pci_queues,bool drop)736 static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
737 				   bool drop)
738 {
739 	u8 q;
740 
741 	for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
742 		/* Unnecessary to flush BCN, H2C and HI tx queues. */
743 		if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C ||
744 		    q == RTW_TX_QUEUE_HI0)
745 			continue;
746 
747 		if (pci_queues & BIT(q))
748 			__pci_flush_queue(rtwdev, q, drop);
749 	}
750 }
751 
rtw_pci_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)752 static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
753 {
754 	u32 pci_queues = 0;
755 	u8 i;
756 
757 	/* If all of the hardware queues are requested to flush,
758 	 * flush all of the pci queues.
759 	 */
760 	if (queues == BIT(rtwdev->hw->queues) - 1) {
761 		pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
762 	} else {
763 		for (i = 0; i < rtwdev->hw->queues; i++)
764 			if (queues & BIT(i))
765 				pci_queues |= BIT(rtw_tx_ac_to_hwq(i));
766 	}
767 
768 	__rtw_pci_flush_queues(rtwdev, pci_queues, drop);
769 }
770 
rtw_pci_tx_kick_off_queue(struct rtw_dev * rtwdev,enum rtw_tx_queue_type queue)771 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,
772 				      enum rtw_tx_queue_type queue)
773 {
774 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
775 	struct rtw_pci_tx_ring *ring;
776 	u32 bd_idx;
777 
778 	ring = &rtwpci->tx_rings[queue];
779 	bd_idx = rtw_pci_tx_queue_idx_addr[queue];
780 
781 	spin_lock_bh(&rtwpci->irq_lock);
782 	if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
783 		rtw_pci_deep_ps_leave(rtwdev);
784 	rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
785 	spin_unlock_bh(&rtwpci->irq_lock);
786 }
787 
rtw_pci_tx_kick_off(struct rtw_dev * rtwdev)788 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
789 {
790 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
791 	enum rtw_tx_queue_type queue;
792 
793 	for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
794 		if (test_and_clear_bit(queue, rtwpci->tx_queued))
795 			rtw_pci_tx_kick_off_queue(rtwdev, queue);
796 }
797 
rtw_pci_tx_write_data(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb,enum rtw_tx_queue_type queue)798 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
799 				 struct rtw_tx_pkt_info *pkt_info,
800 				 struct sk_buff *skb,
801 				 enum rtw_tx_queue_type queue)
802 {
803 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
804 	const struct rtw_chip_info *chip = rtwdev->chip;
805 	struct rtw_pci_tx_ring *ring;
806 	struct rtw_pci_tx_data *tx_data;
807 	dma_addr_t dma;
808 	u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
809 	u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
810 	u32 size;
811 	u32 psb_len;
812 	u8 *pkt_desc;
813 	struct rtw_pci_tx_buffer_desc *buf_desc;
814 
815 	ring = &rtwpci->tx_rings[queue];
816 
817 	size = skb->len;
818 
819 	if (queue == RTW_TX_QUEUE_BCN)
820 		rtw_pci_release_rsvd_page(rtwpci, ring);
821 	else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
822 		return -ENOSPC;
823 
824 	pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
825 	memset(pkt_desc, 0, tx_pkt_desc_sz);
826 	pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
827 	rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb);
828 	dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
829 			     DMA_TO_DEVICE);
830 	if (dma_mapping_error(&rtwpci->pdev->dev, dma))
831 		return -EBUSY;
832 
833 	/* after this we got dma mapped, there is no way back */
834 	buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
835 	memset(buf_desc, 0, tx_buf_desc_sz);
836 	psb_len = (skb->len - 1) / 128 + 1;
837 	if (queue == RTW_TX_QUEUE_BCN)
838 		psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
839 
840 	buf_desc[0].psb_len = cpu_to_le16(psb_len);
841 	buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
842 	buf_desc[0].dma = cpu_to_le32(dma);
843 	buf_desc[1].buf_size = cpu_to_le16(size);
844 	buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
845 
846 	tx_data = rtw_pci_get_tx_data(skb);
847 	tx_data->dma = dma;
848 	tx_data->sn = pkt_info->sn;
849 
850 	spin_lock_bh(&rtwpci->irq_lock);
851 
852 	skb_queue_tail(&ring->queue, skb);
853 
854 	if (queue == RTW_TX_QUEUE_BCN)
855 		goto out_unlock;
856 
857 	/* update write-index, and kick it off later */
858 	set_bit(queue, rtwpci->tx_queued);
859 	if (++ring->r.wp >= ring->r.len)
860 		ring->r.wp = 0;
861 
862 out_unlock:
863 	spin_unlock_bh(&rtwpci->irq_lock);
864 
865 	return 0;
866 }
867 
rtw_pci_write_data_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)868 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
869 					u32 size)
870 {
871 	struct sk_buff *skb;
872 	struct rtw_tx_pkt_info pkt_info = {0};
873 	u8 reg_bcn_work;
874 	int ret;
875 
876 	skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
877 	if (!skb)
878 		return -ENOMEM;
879 
880 	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
881 	if (ret) {
882 		rtw_err(rtwdev, "failed to write rsvd page data\n");
883 		return ret;
884 	}
885 
886 	/* reserved pages go through beacon queue */
887 	reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
888 	reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
889 	rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
890 
891 	return 0;
892 }
893 
rtw_pci_write_data_h2c(struct rtw_dev * rtwdev,u8 * buf,u32 size)894 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
895 {
896 	struct sk_buff *skb;
897 	struct rtw_tx_pkt_info pkt_info = {0};
898 	int ret;
899 
900 	skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
901 	if (!skb)
902 		return -ENOMEM;
903 
904 	ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
905 	if (ret) {
906 		rtw_err(rtwdev, "failed to write h2c data\n");
907 		return ret;
908 	}
909 
910 	rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
911 
912 	return 0;
913 }
914 
rtw_pci_tx_write(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb)915 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
916 			    struct rtw_tx_pkt_info *pkt_info,
917 			    struct sk_buff *skb)
918 {
919 	enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
920 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
921 	struct rtw_pci_tx_ring *ring;
922 	int ret;
923 
924 	ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
925 	if (ret)
926 		return ret;
927 
928 	ring = &rtwpci->tx_rings[queue];
929 	spin_lock_bh(&rtwpci->irq_lock);
930 	if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
931 		ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
932 		ring->queue_stopped = true;
933 	}
934 	spin_unlock_bh(&rtwpci->irq_lock);
935 
936 	return 0;
937 }
938 
rtw_pci_tx_isr(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u8 hw_queue)939 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
940 			   u8 hw_queue)
941 {
942 	struct ieee80211_hw *hw = rtwdev->hw;
943 	struct ieee80211_tx_info *info;
944 	struct rtw_pci_tx_ring *ring;
945 	struct rtw_pci_tx_data *tx_data;
946 	struct sk_buff *skb;
947 	u32 count;
948 	u32 bd_idx_addr;
949 	u32 bd_idx, cur_rp, rp_idx;
950 	u16 q_map;
951 
952 	ring = &rtwpci->tx_rings[hw_queue];
953 
954 	bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
955 	bd_idx = rtw_read32(rtwdev, bd_idx_addr);
956 	cur_rp = bd_idx >> 16;
957 	cur_rp &= TRX_BD_IDX_MASK;
958 	rp_idx = ring->r.rp;
959 	if (cur_rp >= ring->r.rp)
960 		count = cur_rp - ring->r.rp;
961 	else
962 		count = ring->r.len - (ring->r.rp - cur_rp);
963 
964 	while (count--) {
965 		skb = skb_dequeue(&ring->queue);
966 		if (!skb) {
967 			rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
968 				count, hw_queue, bd_idx, ring->r.rp, cur_rp);
969 			break;
970 		}
971 		tx_data = rtw_pci_get_tx_data(skb);
972 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
973 				 DMA_TO_DEVICE);
974 
975 		/* just free command packets from host to card */
976 		if (hw_queue == RTW_TX_QUEUE_H2C) {
977 			dev_kfree_skb_irq(skb);
978 			continue;
979 		}
980 
981 		if (ring->queue_stopped &&
982 		    avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
983 			q_map = skb_get_queue_mapping(skb);
984 			ieee80211_wake_queue(hw, q_map);
985 			ring->queue_stopped = false;
986 		}
987 
988 		if (++rp_idx >= ring->r.len)
989 			rp_idx = 0;
990 
991 		skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
992 
993 		info = IEEE80211_SKB_CB(skb);
994 
995 		/* enqueue to wait for tx report */
996 		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
997 			rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
998 			continue;
999 		}
1000 
1001 		/* always ACK for others, then they won't be marked as drop */
1002 		if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1003 			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1004 		else
1005 			info->flags |= IEEE80211_TX_STAT_ACK;
1006 
1007 		ieee80211_tx_info_clear_status(info);
1008 		ieee80211_tx_status_irqsafe(hw, skb);
1009 	}
1010 
1011 	ring->r.rp = cur_rp;
1012 }
1013 
rtw_pci_rx_isr(struct rtw_dev * rtwdev)1014 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1015 {
1016 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1017 	struct napi_struct *napi = &rtwpci->napi;
1018 
1019 	napi_schedule(napi);
1020 }
1021 
rtw_pci_get_hw_rx_ring_nr(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)1022 static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1023 				     struct rtw_pci *rtwpci)
1024 {
1025 	struct rtw_pci_rx_ring *ring;
1026 	int count = 0;
1027 	u32 tmp, cur_wp;
1028 
1029 	ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1030 	tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1031 	cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1032 	if (cur_wp >= ring->r.wp)
1033 		count = cur_wp - ring->r.wp;
1034 	else
1035 		count = ring->r.len - (ring->r.wp - cur_wp);
1036 
1037 	return count;
1038 }
1039 
rtw_pci_rx_napi(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u8 hw_queue,u32 limit)1040 static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1041 			   u8 hw_queue, u32 limit)
1042 {
1043 	const struct rtw_chip_info *chip = rtwdev->chip;
1044 	struct napi_struct *napi = &rtwpci->napi;
1045 	struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1046 	struct rtw_rx_pkt_stat pkt_stat;
1047 	struct ieee80211_rx_status rx_status;
1048 	struct sk_buff *skb, *new;
1049 	u32 cur_rp = ring->r.rp;
1050 	u32 count, rx_done = 0;
1051 	u32 pkt_offset;
1052 	u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1053 	u32 buf_desc_sz = chip->rx_buf_desc_sz;
1054 	u32 new_len;
1055 	u8 *rx_desc;
1056 	dma_addr_t dma;
1057 
1058 	count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1059 	count = min(count, limit);
1060 
1061 	while (count--) {
1062 		rtw_pci_dma_check(rtwdev, ring, cur_rp);
1063 		skb = ring->buf[cur_rp];
1064 		dma = *((dma_addr_t *)skb->cb);
1065 		dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1066 					DMA_FROM_DEVICE);
1067 		rx_desc = skb->data;
1068 		rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1069 
1070 		/* offset from rx_desc to payload */
1071 		pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1072 			     pkt_stat.shift;
1073 
1074 		/* allocate a new skb for this frame,
1075 		 * discard the frame if none available
1076 		 */
1077 		new_len = pkt_stat.pkt_len + pkt_offset;
1078 		new = dev_alloc_skb(new_len);
1079 		if (WARN_ONCE(!new, "rx routine starvation\n"))
1080 			goto next_rp;
1081 
1082 		/* put the DMA data including rx_desc from phy to new skb */
1083 		skb_put_data(new, skb->data, new_len);
1084 
1085 		if (pkt_stat.is_c2h) {
1086 			rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1087 		} else {
1088 			/* remove rx_desc */
1089 			skb_pull(new, pkt_offset);
1090 
1091 			rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat);
1092 			rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1093 			memcpy(new->cb, &rx_status, sizeof(rx_status));
1094 			ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1095 			rx_done++;
1096 		}
1097 
1098 next_rp:
1099 		/* new skb delivered to mac80211, re-enable original skb DMA */
1100 		rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1101 					    buf_desc_sz);
1102 
1103 		/* host read next element in ring */
1104 		if (++cur_rp >= ring->r.len)
1105 			cur_rp = 0;
1106 	}
1107 
1108 	ring->r.rp = cur_rp;
1109 	/* 'rp', the last position we have read, is seen as previous posistion
1110 	 * of 'wp' that is used to calculate 'count' next time.
1111 	 */
1112 	ring->r.wp = cur_rp;
1113 	rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1114 
1115 	return rx_done;
1116 }
1117 
rtw_pci_irq_recognized(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u32 * irq_status)1118 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1119 				   struct rtw_pci *rtwpci, u32 *irq_status)
1120 {
1121 	unsigned long flags;
1122 
1123 	spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1124 
1125 	irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1126 	irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1127 	if (rtw_chip_wcpu_11ac(rtwdev))
1128 		irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1129 	else
1130 		irq_status[3] = 0;
1131 	irq_status[0] &= rtwpci->irq_mask[0];
1132 	irq_status[1] &= rtwpci->irq_mask[1];
1133 	irq_status[3] &= rtwpci->irq_mask[3];
1134 	rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1135 	rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1136 	if (rtw_chip_wcpu_11ac(rtwdev))
1137 		rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1138 
1139 	spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1140 }
1141 
rtw_pci_interrupt_handler(int irq,void * dev)1142 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1143 {
1144 	struct rtw_dev *rtwdev = dev;
1145 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1146 
1147 	/* disable RTW PCI interrupt to avoid more interrupts before the end of
1148 	 * thread function
1149 	 *
1150 	 * disable HIMR here to also avoid new HISR flag being raised before
1151 	 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1152 	 * are cleared, the edge-triggered interrupt will not be generated when
1153 	 * a new HISR flag is set.
1154 	 */
1155 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1156 
1157 	return IRQ_WAKE_THREAD;
1158 }
1159 
rtw_pci_interrupt_threadfn(int irq,void * dev)1160 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1161 {
1162 	struct rtw_dev *rtwdev = dev;
1163 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1164 	u32 irq_status[4];
1165 	bool rx = false;
1166 
1167 	spin_lock_bh(&rtwpci->irq_lock);
1168 	rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1169 
1170 	if (irq_status[0] & IMR_MGNTDOK)
1171 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1172 	if (irq_status[0] & IMR_HIGHDOK)
1173 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1174 	if (irq_status[0] & IMR_BEDOK)
1175 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1176 	if (irq_status[0] & IMR_BKDOK)
1177 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1178 	if (irq_status[0] & IMR_VODOK)
1179 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1180 	if (irq_status[0] & IMR_VIDOK)
1181 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1182 	if (irq_status[3] & IMR_H2CDOK)
1183 		rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1184 	if (irq_status[0] & IMR_ROK) {
1185 		rtw_pci_rx_isr(rtwdev);
1186 		rx = true;
1187 	}
1188 	if (unlikely(irq_status[0] & IMR_C2HCMD))
1189 		rtw_fw_c2h_cmd_isr(rtwdev);
1190 
1191 	/* all of the jobs for this interrupt have been done */
1192 	if (rtwpci->running)
1193 		rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1194 	spin_unlock_bh(&rtwpci->irq_lock);
1195 
1196 	return IRQ_HANDLED;
1197 }
1198 
rtw_pci_io_mapping(struct rtw_dev * rtwdev,struct pci_dev * pdev)1199 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1200 			      struct pci_dev *pdev)
1201 {
1202 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1203 	unsigned long len;
1204 	u8 bar_id = 2;
1205 	int ret;
1206 
1207 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
1208 	if (ret) {
1209 		rtw_err(rtwdev, "failed to request pci regions\n");
1210 		return ret;
1211 	}
1212 
1213 	len = pci_resource_len(pdev, bar_id);
1214 	rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1215 	if (!rtwpci->mmap) {
1216 		pci_release_regions(pdev);
1217 		rtw_err(rtwdev, "failed to map pci memory\n");
1218 		return -ENOMEM;
1219 	}
1220 
1221 	return 0;
1222 }
1223 
rtw_pci_io_unmapping(struct rtw_dev * rtwdev,struct pci_dev * pdev)1224 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1225 				 struct pci_dev *pdev)
1226 {
1227 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1228 
1229 	if (rtwpci->mmap) {
1230 		pci_iounmap(pdev, rtwpci->mmap);
1231 		pci_release_regions(pdev);
1232 	}
1233 }
1234 
rtw_dbi_write8(struct rtw_dev * rtwdev,u16 addr,u8 data)1235 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1236 {
1237 	u16 write_addr;
1238 	u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1239 	u8 flag;
1240 	u8 cnt;
1241 
1242 	write_addr = addr & BITS_DBI_ADDR_MASK;
1243 	write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1244 	rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1245 	rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1246 	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1247 
1248 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1249 		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1250 		if (flag == 0)
1251 			return;
1252 
1253 		udelay(10);
1254 	}
1255 
1256 	WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1257 }
1258 
rtw_dbi_read8(struct rtw_dev * rtwdev,u16 addr,u8 * value)1259 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1260 {
1261 	u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1262 	u8 flag;
1263 	u8 cnt;
1264 
1265 	rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1266 	rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1267 
1268 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1269 		flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1270 		if (flag == 0) {
1271 			read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1272 			*value = rtw_read8(rtwdev, read_addr);
1273 			return 0;
1274 		}
1275 
1276 		udelay(10);
1277 	}
1278 
1279 	WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1280 	return -EIO;
1281 }
1282 
rtw_mdio_write(struct rtw_dev * rtwdev,u8 addr,u16 data,bool g1)1283 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1284 {
1285 	u8 page;
1286 	u8 wflag;
1287 	u8 cnt;
1288 
1289 	rtw_write16(rtwdev, REG_MDIO_V1, data);
1290 
1291 	page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1292 	page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1293 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1294 	rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1295 	rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1296 
1297 	for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1298 		wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1299 					BIT_MDIO_WFLAG_V1);
1300 		if (wflag == 0)
1301 			return;
1302 
1303 		udelay(10);
1304 	}
1305 
1306 	WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1307 }
1308 
rtw_pci_clkreq_set(struct rtw_dev * rtwdev,bool enable)1309 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1310 {
1311 	u8 value;
1312 	int ret;
1313 
1314 	if (rtw_pci_disable_aspm)
1315 		return;
1316 
1317 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1318 	if (ret) {
1319 		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1320 		return;
1321 	}
1322 
1323 	if (enable)
1324 		value |= BIT_CLKREQ_SW_EN;
1325 	else
1326 		value &= ~BIT_CLKREQ_SW_EN;
1327 
1328 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1329 }
1330 
rtw_pci_clkreq_pad_low(struct rtw_dev * rtwdev,bool enable)1331 static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1332 {
1333 	u8 value;
1334 	int ret;
1335 
1336 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1337 	if (ret) {
1338 		rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1339 		return;
1340 	}
1341 
1342 	if (enable)
1343 		value &= ~BIT_CLKREQ_N_PAD;
1344 	else
1345 		value |= BIT_CLKREQ_N_PAD;
1346 
1347 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1348 }
1349 
rtw_pci_aspm_set(struct rtw_dev * rtwdev,bool enable)1350 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1351 {
1352 	u8 value;
1353 	int ret;
1354 
1355 	if (rtw_pci_disable_aspm)
1356 		return;
1357 
1358 	ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1359 	if (ret) {
1360 		rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1361 		return;
1362 	}
1363 
1364 	if (enable)
1365 		value |= BIT_L1_SW_EN;
1366 	else
1367 		value &= ~BIT_L1_SW_EN;
1368 
1369 	rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1370 }
1371 
rtw_pci_link_ps(struct rtw_dev * rtwdev,bool enter)1372 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1373 {
1374 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1375 
1376 	/* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1377 	 * only be enabled when host supports it.
1378 	 *
1379 	 * And ASPM mechanism should be enabled when driver/firmware enters
1380 	 * power save mode, without having heavy traffic. Because we've
1381 	 * experienced some inter-operability issues that the link tends
1382 	 * to enter L1 state on the fly even when driver is having high
1383 	 * throughput. This is probably because the ASPM behavior slightly
1384 	 * varies from different SOC.
1385 	 */
1386 	if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1387 		return;
1388 
1389 	if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1390 	    (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1391 		rtw_pci_aspm_set(rtwdev, enter);
1392 }
1393 
rtw_pci_link_cfg(struct rtw_dev * rtwdev)1394 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1395 {
1396 	const struct rtw_chip_info *chip = rtwdev->chip;
1397 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1398 	struct pci_dev *pdev = rtwpci->pdev;
1399 	u16 link_ctrl;
1400 	int ret;
1401 
1402 	/* RTL8822CE has enabled REFCLK auto calibration, it does not need
1403 	 * to add clock delay to cover the REFCLK timing gap.
1404 	 */
1405 	if (chip->id == RTW_CHIP_TYPE_8822C)
1406 		rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1407 
1408 	/* Though there is standard PCIE configuration space to set the
1409 	 * link control register, but by Realtek's design, driver should
1410 	 * check if host supports CLKREQ/ASPM to enable the HW module.
1411 	 *
1412 	 * These functions are implemented by two HW modules associated,
1413 	 * one is responsible to access PCIE configuration space to
1414 	 * follow the host settings, and another is in charge of doing
1415 	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1416 	 * the host does not support it, and due to some reasons or wrong
1417 	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1418 	 * loss if HW misbehaves on the link.
1419 	 *
1420 	 * Hence it's designed that driver should first check the PCIE
1421 	 * configuration space is sync'ed and enabled, then driver can turn
1422 	 * on the other module that is actually working on the mechanism.
1423 	 */
1424 	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1425 	if (ret) {
1426 		rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1427 		return;
1428 	}
1429 
1430 	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1431 		rtw_pci_clkreq_set(rtwdev, true);
1432 
1433 	rtwpci->link_ctrl = link_ctrl;
1434 }
1435 
rtw_pci_interface_cfg(struct rtw_dev * rtwdev)1436 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1437 {
1438 	const struct rtw_chip_info *chip = rtwdev->chip;
1439 
1440 	switch (chip->id) {
1441 	case RTW_CHIP_TYPE_8822C:
1442 		if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1443 			rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1444 					 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1445 		break;
1446 	default:
1447 		break;
1448 	}
1449 }
1450 
rtw_pci_phy_cfg(struct rtw_dev * rtwdev)1451 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1452 {
1453 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1454 	const struct rtw_chip_info *chip = rtwdev->chip;
1455 	struct rtw_efuse *efuse = &rtwdev->efuse;
1456 	struct pci_dev *pdev = rtwpci->pdev;
1457 	const struct rtw_intf_phy_para *para;
1458 	u16 cut;
1459 	u16 value;
1460 	u16 offset;
1461 	int i;
1462 	int ret;
1463 
1464 	cut = BIT(0) << rtwdev->hal.cut_version;
1465 
1466 	for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1467 		para = &chip->intf_table->gen1_para[i];
1468 		if (!(para->cut_mask & cut))
1469 			continue;
1470 		if (para->offset == 0xffff)
1471 			break;
1472 		offset = para->offset;
1473 		value = para->value;
1474 		if (para->ip_sel == RTW_IP_SEL_PHY)
1475 			rtw_mdio_write(rtwdev, offset, value, true);
1476 		else
1477 			rtw_dbi_write8(rtwdev, offset, value);
1478 	}
1479 
1480 	for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1481 		para = &chip->intf_table->gen2_para[i];
1482 		if (!(para->cut_mask & cut))
1483 			continue;
1484 		if (para->offset == 0xffff)
1485 			break;
1486 		offset = para->offset;
1487 		value = para->value;
1488 		if (para->ip_sel == RTW_IP_SEL_PHY)
1489 			rtw_mdio_write(rtwdev, offset, value, false);
1490 		else
1491 			rtw_dbi_write8(rtwdev, offset, value);
1492 	}
1493 
1494 	rtw_pci_link_cfg(rtwdev);
1495 
1496 	/* Disable 8821ce completion timeout by default */
1497 	if (chip->id == RTW_CHIP_TYPE_8821C) {
1498 		ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1499 					       PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
1500 		if (ret)
1501 			rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
1502 				ret);
1503 	}
1504 
1505 	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
1506 		rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);
1507 }
1508 
rtw_pci_suspend(struct device * dev)1509 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1510 {
1511 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1512 	struct rtw_dev *rtwdev = hw->priv;
1513 	const struct rtw_chip_info *chip = rtwdev->chip;
1514 	struct rtw_efuse *efuse = &rtwdev->efuse;
1515 
1516 	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1517 		rtw_pci_clkreq_pad_low(rtwdev, true);
1518 	return 0;
1519 }
1520 
rtw_pci_resume(struct device * dev)1521 static int __maybe_unused rtw_pci_resume(struct device *dev)
1522 {
1523 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
1524 	struct rtw_dev *rtwdev = hw->priv;
1525 	const struct rtw_chip_info *chip = rtwdev->chip;
1526 	struct rtw_efuse *efuse = &rtwdev->efuse;
1527 
1528 	if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1529 		rtw_pci_clkreq_pad_low(rtwdev, false);
1530 	return 0;
1531 }
1532 
1533 SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1534 EXPORT_SYMBOL(rtw_pm_ops);
1535 
rtw_pci_claim(struct rtw_dev * rtwdev,struct pci_dev * pdev)1536 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1537 {
1538 	int ret;
1539 
1540 	ret = pci_enable_device(pdev);
1541 	if (ret) {
1542 		rtw_err(rtwdev, "failed to enable pci device\n");
1543 		return ret;
1544 	}
1545 
1546 	pci_set_master(pdev);
1547 	pci_set_drvdata(pdev, rtwdev->hw);
1548 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1549 
1550 	return 0;
1551 }
1552 
rtw_pci_declaim(struct rtw_dev * rtwdev,struct pci_dev * pdev)1553 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1554 {
1555 	pci_disable_device(pdev);
1556 }
1557 
rtw_pci_setup_resource(struct rtw_dev * rtwdev,struct pci_dev * pdev)1558 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1559 {
1560 	struct rtw_pci *rtwpci;
1561 	int ret;
1562 
1563 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1564 	rtwpci->pdev = pdev;
1565 
1566 	/* after this driver can access to hw registers */
1567 	ret = rtw_pci_io_mapping(rtwdev, pdev);
1568 	if (ret) {
1569 		rtw_err(rtwdev, "failed to request pci io region\n");
1570 		goto err_out;
1571 	}
1572 
1573 	ret = rtw_pci_init(rtwdev);
1574 	if (ret) {
1575 		rtw_err(rtwdev, "failed to allocate pci resources\n");
1576 		goto err_io_unmap;
1577 	}
1578 
1579 	return 0;
1580 
1581 err_io_unmap:
1582 	rtw_pci_io_unmapping(rtwdev, pdev);
1583 
1584 err_out:
1585 	return ret;
1586 }
1587 
rtw_pci_destroy(struct rtw_dev * rtwdev,struct pci_dev * pdev)1588 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1589 {
1590 	rtw_pci_deinit(rtwdev);
1591 	rtw_pci_io_unmapping(rtwdev, pdev);
1592 }
1593 
1594 static struct rtw_hci_ops rtw_pci_ops = {
1595 	.tx_write = rtw_pci_tx_write,
1596 	.tx_kick_off = rtw_pci_tx_kick_off,
1597 	.flush_queues = rtw_pci_flush_queues,
1598 	.setup = rtw_pci_setup,
1599 	.start = rtw_pci_start,
1600 	.stop = rtw_pci_stop,
1601 	.deep_ps = rtw_pci_deep_ps,
1602 	.link_ps = rtw_pci_link_ps,
1603 	.interface_cfg = rtw_pci_interface_cfg,
1604 	.dynamic_rx_agg = NULL,
1605 
1606 	.read8 = rtw_pci_read8,
1607 	.read16 = rtw_pci_read16,
1608 	.read32 = rtw_pci_read32,
1609 	.write8 = rtw_pci_write8,
1610 	.write16 = rtw_pci_write16,
1611 	.write32 = rtw_pci_write32,
1612 	.write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1613 	.write_data_h2c = rtw_pci_write_data_h2c,
1614 };
1615 
rtw_pci_request_irq(struct rtw_dev * rtwdev,struct pci_dev * pdev)1616 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1617 {
1618 	unsigned int flags = PCI_IRQ_INTX;
1619 	int ret;
1620 
1621 	if (!rtw_disable_msi)
1622 		flags |= PCI_IRQ_MSI;
1623 
1624 	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1625 	if (ret < 0) {
1626 		rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1627 		return ret;
1628 	}
1629 
1630 	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1631 					rtw_pci_interrupt_handler,
1632 					rtw_pci_interrupt_threadfn,
1633 					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1634 	if (ret) {
1635 		rtw_err(rtwdev, "failed to request irq %d\n", ret);
1636 		pci_free_irq_vectors(pdev);
1637 	}
1638 
1639 	return ret;
1640 }
1641 
rtw_pci_free_irq(struct rtw_dev * rtwdev,struct pci_dev * pdev)1642 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1643 {
1644 	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1645 	pci_free_irq_vectors(pdev);
1646 }
1647 
rtw_pci_napi_poll(struct napi_struct * napi,int budget)1648 static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1649 {
1650 	struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1651 	struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1652 					      priv);
1653 	int work_done = 0;
1654 
1655 	if (rtwpci->rx_no_aspm)
1656 		rtw_pci_link_ps(rtwdev, false);
1657 
1658 	while (work_done < budget) {
1659 		u32 work_done_once;
1660 
1661 		work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1662 						 budget - work_done);
1663 		if (work_done_once == 0)
1664 			break;
1665 		work_done += work_done_once;
1666 	}
1667 	if (work_done < budget) {
1668 		napi_complete_done(napi, work_done);
1669 		spin_lock_bh(&rtwpci->irq_lock);
1670 		if (rtwpci->running)
1671 			rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1672 		spin_unlock_bh(&rtwpci->irq_lock);
1673 		/* When ISR happens during polling and before napi_complete
1674 		 * while no further data is received. Data on the dma_ring will
1675 		 * not be processed immediately. Check whether dma ring is
1676 		 * empty and perform napi_schedule accordingly.
1677 		 */
1678 		if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1679 			napi_schedule(napi);
1680 	}
1681 	if (rtwpci->rx_no_aspm)
1682 		rtw_pci_link_ps(rtwdev, true);
1683 
1684 	return work_done;
1685 }
1686 
rtw_pci_napi_init(struct rtw_dev * rtwdev)1687 static int rtw_pci_napi_init(struct rtw_dev *rtwdev)
1688 {
1689 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1690 
1691 	rtwpci->netdev = alloc_netdev_dummy(0);
1692 	if (!rtwpci->netdev)
1693 		return -ENOMEM;
1694 
1695 	netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
1696 	return 0;
1697 }
1698 
rtw_pci_napi_deinit(struct rtw_dev * rtwdev)1699 static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1700 {
1701 	struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1702 
1703 	rtw_pci_napi_stop(rtwdev);
1704 	netif_napi_del(&rtwpci->napi);
1705 	free_netdev(rtwpci->netdev);
1706 }
1707 
rtw_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1708 int rtw_pci_probe(struct pci_dev *pdev,
1709 		  const struct pci_device_id *id)
1710 {
1711 	struct pci_dev *bridge = pci_upstream_bridge(pdev);
1712 	struct ieee80211_hw *hw;
1713 	struct rtw_dev *rtwdev;
1714 	struct rtw_pci *rtwpci;
1715 	int drv_data_size;
1716 	int ret;
1717 
1718 	drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1719 	hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1720 	if (!hw) {
1721 		dev_err(&pdev->dev, "failed to allocate hw\n");
1722 		return -ENOMEM;
1723 	}
1724 
1725 	rtwdev = hw->priv;
1726 	rtwdev->hw = hw;
1727 	rtwdev->dev = &pdev->dev;
1728 	rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1729 	rtwdev->hci.ops = &rtw_pci_ops;
1730 	rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1731 
1732 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1733 	atomic_set(&rtwpci->link_usage, 1);
1734 
1735 	ret = rtw_core_init(rtwdev);
1736 	if (ret)
1737 		goto err_release_hw;
1738 
1739 	rtw_dbg(rtwdev, RTW_DBG_PCI,
1740 		"rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1741 		pdev->vendor, pdev->device, pdev->revision);
1742 
1743 	ret = rtw_pci_claim(rtwdev, pdev);
1744 	if (ret) {
1745 		rtw_err(rtwdev, "failed to claim pci device\n");
1746 		goto err_deinit_core;
1747 	}
1748 
1749 	ret = rtw_pci_setup_resource(rtwdev, pdev);
1750 	if (ret) {
1751 		rtw_err(rtwdev, "failed to setup pci resources\n");
1752 		goto err_pci_declaim;
1753 	}
1754 
1755 	ret = rtw_pci_napi_init(rtwdev);
1756 	if (ret) {
1757 		rtw_err(rtwdev, "failed to setup NAPI\n");
1758 		goto err_pci_declaim;
1759 	}
1760 
1761 	ret = rtw_chip_info_setup(rtwdev);
1762 	if (ret) {
1763 		rtw_err(rtwdev, "failed to setup chip information\n");
1764 		goto err_destroy_pci;
1765 	}
1766 
1767 	/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
1768 	if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
1769 		rtwpci->rx_no_aspm = true;
1770 
1771 	rtw_pci_phy_cfg(rtwdev);
1772 
1773 	ret = rtw_register_hw(rtwdev, hw);
1774 	if (ret) {
1775 		rtw_err(rtwdev, "failed to register hw\n");
1776 		goto err_destroy_pci;
1777 	}
1778 
1779 	ret = rtw_pci_request_irq(rtwdev, pdev);
1780 	if (ret) {
1781 		ieee80211_unregister_hw(hw);
1782 		goto err_destroy_pci;
1783 	}
1784 
1785 	return 0;
1786 
1787 err_destroy_pci:
1788 	rtw_pci_napi_deinit(rtwdev);
1789 	rtw_pci_destroy(rtwdev, pdev);
1790 
1791 err_pci_declaim:
1792 	rtw_pci_declaim(rtwdev, pdev);
1793 
1794 err_deinit_core:
1795 	rtw_core_deinit(rtwdev);
1796 
1797 err_release_hw:
1798 	ieee80211_free_hw(hw);
1799 
1800 	return ret;
1801 }
1802 EXPORT_SYMBOL(rtw_pci_probe);
1803 
rtw_pci_remove(struct pci_dev * pdev)1804 void rtw_pci_remove(struct pci_dev *pdev)
1805 {
1806 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1807 	struct rtw_dev *rtwdev;
1808 	struct rtw_pci *rtwpci;
1809 
1810 	if (!hw)
1811 		return;
1812 
1813 	rtwdev = hw->priv;
1814 	rtwpci = (struct rtw_pci *)rtwdev->priv;
1815 
1816 	rtw_unregister_hw(rtwdev, hw);
1817 	rtw_pci_disable_interrupt(rtwdev, rtwpci);
1818 	rtw_pci_napi_deinit(rtwdev);
1819 	rtw_pci_destroy(rtwdev, pdev);
1820 	rtw_pci_declaim(rtwdev, pdev);
1821 	rtw_pci_free_irq(rtwdev, pdev);
1822 	rtw_core_deinit(rtwdev);
1823 	ieee80211_free_hw(hw);
1824 }
1825 EXPORT_SYMBOL(rtw_pci_remove);
1826 
rtw_pci_shutdown(struct pci_dev * pdev)1827 void rtw_pci_shutdown(struct pci_dev *pdev)
1828 {
1829 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1830 	struct rtw_dev *rtwdev;
1831 	const struct rtw_chip_info *chip;
1832 
1833 	if (!hw)
1834 		return;
1835 
1836 	rtwdev = hw->priv;
1837 	chip = rtwdev->chip;
1838 
1839 	if (chip->ops->shutdown)
1840 		chip->ops->shutdown(rtwdev);
1841 
1842 	pci_set_power_state(pdev, PCI_D3hot);
1843 }
1844 EXPORT_SYMBOL(rtw_pci_shutdown);
1845 
1846 MODULE_AUTHOR("Realtek Corporation");
1847 MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver");
1848 MODULE_LICENSE("Dual BSD/GPL");
1849