1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
3 */
4
5 #if defined(__FreeBSD__)
6 #define LINUXKPI_PARAM_PREFIX rtw88_pci_
7 #endif
8
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include "main.h"
12 #include "pci.h"
13 #include "reg.h"
14 #include "tx.h"
15 #include "rx.h"
16 #include "fw.h"
17 #include "ps.h"
18 #include "debug.h"
19 #include "mac.h"
20 #if defined(__FreeBSD__)
21 #include <sys/rman.h>
22 #include <linux/pm.h>
23 #endif
24
25 static bool rtw_disable_msi;
26 static bool rtw_pci_disable_aspm;
27 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
28 module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
29 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
30 MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
31
32 static const u32 rtw_pci_tx_queue_idx_addr[] = {
33 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
34 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
35 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
36 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
37 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
38 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
39 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
40 };
41
rtw_pci_get_tx_qsel(struct sk_buff * skb,enum rtw_tx_queue_type queue)42 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,
43 enum rtw_tx_queue_type queue)
44 {
45 switch (queue) {
46 case RTW_TX_QUEUE_BCN:
47 return TX_DESC_QSEL_BEACON;
48 case RTW_TX_QUEUE_H2C:
49 return TX_DESC_QSEL_H2C;
50 case RTW_TX_QUEUE_MGMT:
51 return TX_DESC_QSEL_MGMT;
52 case RTW_TX_QUEUE_HI0:
53 return TX_DESC_QSEL_HIGH;
54 default:
55 return skb->priority;
56 }
57 };
58
rtw_pci_read8(struct rtw_dev * rtwdev,u32 addr)59 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
60 {
61 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
62
63 #if defined(__linux__)
64 return readb(rtwpci->mmap + addr);
65 #elif defined(__FreeBSD__)
66 u8 val;
67
68 val = bus_read_1((struct resource *)rtwpci->mmap, addr);
69 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val);
70 return (val);
71 #endif
72 }
73
rtw_pci_read16(struct rtw_dev * rtwdev,u32 addr)74 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
75 {
76 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
77
78 #if defined(__linux__)
79 return readw(rtwpci->mmap + addr);
80 #elif defined(__FreeBSD__)
81 u16 val;
82
83 val = bus_read_2((struct resource *)rtwpci->mmap, addr);
84 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val);
85 return (val);
86 #endif
87 }
88
rtw_pci_read32(struct rtw_dev * rtwdev,u32 addr)89 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
90 {
91 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
92
93 #if defined(__linux__)
94 return readl(rtwpci->mmap + addr);
95 #elif defined(__FreeBSD__)
96 u32 val;
97
98 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
99 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
100 return (val);
101 #endif
102 }
103
rtw_pci_write8(struct rtw_dev * rtwdev,u32 addr,u8 val)104 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
105 {
106 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
107
108 #if defined(__linux__)
109 writeb(val, rtwpci->mmap + addr);
110 #elif defined(__FreeBSD__)
111 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, val);
112 return (bus_write_1((struct resource *)rtwpci->mmap, addr, val));
113 #endif
114 }
115
rtw_pci_write16(struct rtw_dev * rtwdev,u32 addr,u16 val)116 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
117 {
118 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
119
120 #if defined(__linux__)
121 writew(val, rtwpci->mmap + addr);
122 #elif defined(__FreeBSD__)
123 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, val);
124 return (bus_write_2((struct resource *)rtwpci->mmap, addr, val));
125 #endif
126 }
127
rtw_pci_write32(struct rtw_dev * rtwdev,u32 addr,u32 val)128 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
129 {
130 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
131
132 #if defined(__linux__)
133 writel(val, rtwpci->mmap + addr);
134 #elif defined(__FreeBSD__)
135 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, val);
136 return (bus_write_4((struct resource *)rtwpci->mmap, addr, val));
137 #endif
138 }
139
rtw_pci_free_tx_ring_skbs(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring)140 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
141 struct rtw_pci_tx_ring *tx_ring)
142 {
143 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
144 struct rtw_pci_tx_data *tx_data;
145 struct sk_buff *skb, *tmp;
146 dma_addr_t dma;
147
148 /* free every skb remained in tx list */
149 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
150 __skb_unlink(skb, &tx_ring->queue);
151 tx_data = rtw_pci_get_tx_data(skb);
152 dma = tx_data->dma;
153
154 dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
155 dev_kfree_skb_any(skb);
156 }
157 }
158
rtw_pci_free_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring)159 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
160 struct rtw_pci_tx_ring *tx_ring)
161 {
162 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
163 u8 *head = tx_ring->r.head;
164 u32 len = tx_ring->r.len;
165 int ring_sz = len * tx_ring->r.desc_size;
166
167 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
168
169 /* free the ring itself */
170 dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
171 tx_ring->r.head = NULL;
172 }
173
rtw_pci_free_rx_ring_skbs(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring)174 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
175 struct rtw_pci_rx_ring *rx_ring)
176 {
177 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
178 struct sk_buff *skb;
179 int buf_sz = RTK_PCI_RX_BUF_SIZE;
180 dma_addr_t dma;
181 int i;
182
183 for (i = 0; i < rx_ring->r.len; i++) {
184 skb = rx_ring->buf[i];
185 if (!skb)
186 continue;
187
188 dma = *((dma_addr_t *)skb->cb);
189 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
190 dev_kfree_skb(skb);
191 rx_ring->buf[i] = NULL;
192 }
193 }
194
rtw_pci_free_rx_ring(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring)195 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
196 struct rtw_pci_rx_ring *rx_ring)
197 {
198 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
199 u8 *head = rx_ring->r.head;
200 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
201
202 rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
203
204 dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
205 }
206
rtw_pci_free_trx_ring(struct rtw_dev * rtwdev)207 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
208 {
209 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
210 struct rtw_pci_tx_ring *tx_ring;
211 struct rtw_pci_rx_ring *rx_ring;
212 int i;
213
214 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
215 tx_ring = &rtwpci->tx_rings[i];
216 rtw_pci_free_tx_ring(rtwdev, tx_ring);
217 }
218
219 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
220 rx_ring = &rtwpci->rx_rings[i];
221 rtw_pci_free_rx_ring(rtwdev, rx_ring);
222 }
223 }
224
rtw_pci_init_tx_ring(struct rtw_dev * rtwdev,struct rtw_pci_tx_ring * tx_ring,u8 desc_size,u32 len)225 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
226 struct rtw_pci_tx_ring *tx_ring,
227 u8 desc_size, u32 len)
228 {
229 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
230 int ring_sz = desc_size * len;
231 dma_addr_t dma;
232 u8 *head;
233
234 if (len > TRX_BD_IDX_MASK) {
235 rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
236 return -EINVAL;
237 }
238
239 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
240 if (!head) {
241 rtw_err(rtwdev, "failed to allocate tx ring\n");
242 return -ENOMEM;
243 }
244
245 skb_queue_head_init(&tx_ring->queue);
246 tx_ring->r.head = head;
247 tx_ring->r.dma = dma;
248 tx_ring->r.len = len;
249 tx_ring->r.desc_size = desc_size;
250 tx_ring->r.wp = 0;
251 tx_ring->r.rp = 0;
252
253 return 0;
254 }
255
rtw_pci_reset_rx_desc(struct rtw_dev * rtwdev,struct sk_buff * skb,struct rtw_pci_rx_ring * rx_ring,u32 idx,u32 desc_sz)256 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
257 struct rtw_pci_rx_ring *rx_ring,
258 u32 idx, u32 desc_sz)
259 {
260 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
261 struct rtw_pci_rx_buffer_desc *buf_desc;
262 int buf_sz = RTK_PCI_RX_BUF_SIZE;
263 dma_addr_t dma;
264
265 if (!skb)
266 return -EINVAL;
267
268 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
269 if (dma_mapping_error(&pdev->dev, dma))
270 return -EBUSY;
271
272 *((dma_addr_t *)skb->cb) = dma;
273 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
274 idx * desc_sz);
275 memset(buf_desc, 0, sizeof(*buf_desc));
276 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
277 buf_desc->dma = cpu_to_le32(dma);
278
279 return 0;
280 }
281
rtw_pci_sync_rx_desc_device(struct rtw_dev * rtwdev,dma_addr_t dma,struct rtw_pci_rx_ring * rx_ring,u32 idx,u32 desc_sz)282 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
283 struct rtw_pci_rx_ring *rx_ring,
284 u32 idx, u32 desc_sz)
285 {
286 struct device *dev = rtwdev->dev;
287 struct rtw_pci_rx_buffer_desc *buf_desc;
288 int buf_sz = RTK_PCI_RX_BUF_SIZE;
289
290 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
291
292 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
293 idx * desc_sz);
294 memset(buf_desc, 0, sizeof(*buf_desc));
295 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
296 buf_desc->dma = cpu_to_le32(dma);
297 }
298
rtw_pci_init_rx_ring(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring,u8 desc_size,u32 len)299 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
300 struct rtw_pci_rx_ring *rx_ring,
301 u8 desc_size, u32 len)
302 {
303 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
304 struct sk_buff *skb = NULL;
305 dma_addr_t dma;
306 u8 *head;
307 int ring_sz = desc_size * len;
308 int buf_sz = RTK_PCI_RX_BUF_SIZE;
309 int i, allocated;
310 int ret = 0;
311
312 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
313 if (!head) {
314 rtw_err(rtwdev, "failed to allocate rx ring\n");
315 return -ENOMEM;
316 }
317 rx_ring->r.head = head;
318
319 for (i = 0; i < len; i++) {
320 skb = dev_alloc_skb(buf_sz);
321 if (!skb) {
322 allocated = i;
323 ret = -ENOMEM;
324 goto err_out;
325 }
326
327 memset(skb->data, 0, buf_sz);
328 rx_ring->buf[i] = skb;
329 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
330 if (ret) {
331 allocated = i;
332 dev_kfree_skb_any(skb);
333 goto err_out;
334 }
335 }
336
337 rx_ring->r.dma = dma;
338 rx_ring->r.len = len;
339 rx_ring->r.desc_size = desc_size;
340 rx_ring->r.wp = 0;
341 rx_ring->r.rp = 0;
342
343 return 0;
344
345 err_out:
346 for (i = 0; i < allocated; i++) {
347 skb = rx_ring->buf[i];
348 if (!skb)
349 continue;
350 dma = *((dma_addr_t *)skb->cb);
351 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
352 dev_kfree_skb_any(skb);
353 rx_ring->buf[i] = NULL;
354 }
355 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
356
357 rtw_err(rtwdev, "failed to init rx buffer\n");
358
359 return ret;
360 }
361
rtw_pci_init_trx_ring(struct rtw_dev * rtwdev)362 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
363 {
364 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
365 struct rtw_pci_tx_ring *tx_ring;
366 struct rtw_pci_rx_ring *rx_ring;
367 const struct rtw_chip_info *chip = rtwdev->chip;
368 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
369 int tx_desc_size, rx_desc_size;
370 u32 len;
371 int ret;
372
373 tx_desc_size = chip->tx_buf_desc_sz;
374
375 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
376 tx_ring = &rtwpci->tx_rings[i];
377 len = max_num_of_tx_queue(i);
378 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
379 if (ret)
380 goto out;
381 }
382
383 rx_desc_size = chip->rx_buf_desc_sz;
384
385 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
386 rx_ring = &rtwpci->rx_rings[j];
387 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
388 RTK_MAX_RX_DESC_NUM);
389 if (ret)
390 goto out;
391 }
392
393 return 0;
394
395 out:
396 tx_alloced = i;
397 for (i = 0; i < tx_alloced; i++) {
398 tx_ring = &rtwpci->tx_rings[i];
399 rtw_pci_free_tx_ring(rtwdev, tx_ring);
400 }
401
402 rx_alloced = j;
403 for (j = 0; j < rx_alloced; j++) {
404 rx_ring = &rtwpci->rx_rings[j];
405 rtw_pci_free_rx_ring(rtwdev, rx_ring);
406 }
407
408 return ret;
409 }
410
rtw_pci_deinit(struct rtw_dev * rtwdev)411 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
412 {
413 rtw_pci_free_trx_ring(rtwdev);
414 }
415
rtw_pci_init(struct rtw_dev * rtwdev)416 static int rtw_pci_init(struct rtw_dev *rtwdev)
417 {
418 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
419 int ret = 0;
420
421 rtwpci->irq_mask[0] = IMR_HIGHDOK |
422 IMR_MGNTDOK |
423 IMR_BKDOK |
424 IMR_BEDOK |
425 IMR_VIDOK |
426 IMR_VODOK |
427 IMR_ROK |
428 IMR_BCNDMAINT_E |
429 IMR_C2HCMD |
430 0;
431 rtwpci->irq_mask[1] = IMR_TXFOVW |
432 0;
433 rtwpci->irq_mask[3] = IMR_H2CDOK |
434 0;
435 spin_lock_init(&rtwpci->irq_lock);
436 spin_lock_init(&rtwpci->hwirq_lock);
437 ret = rtw_pci_init_trx_ring(rtwdev);
438
439 return ret;
440 }
441
rtw_pci_reset_buf_desc(struct rtw_dev * rtwdev)442 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
443 {
444 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
445 u32 len;
446 u8 tmp;
447 dma_addr_t dma;
448
449 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
450 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
451
452 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
453 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
454
455 if (!rtw_chip_wcpu_8051(rtwdev)) {
456 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
457 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
458 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
459 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
460 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
461 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
462 }
463
464 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
465 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
466 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
467 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
468 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
469 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
470
471 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
472 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
473 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
474 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
475 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
476 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
477
478 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
479 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
480 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
481 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
482 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
483 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
484
485 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
486 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
487 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
488 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
489 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
490 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
491
492 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
493 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
494 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
495 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
496 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
497 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
498
499 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
500 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
501 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
502 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
503 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
504 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
505
506 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
507 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
508 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
509 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
510 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
511 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
512
513 /* reset read/write point */
514 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
515
516 /* reset H2C Queue index in a single write */
517 if (rtw_chip_wcpu_3081(rtwdev))
518 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
519 BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
520 }
521
rtw_pci_reset_trx_ring(struct rtw_dev * rtwdev)522 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
523 {
524 rtw_pci_reset_buf_desc(rtwdev);
525 }
526
rtw_pci_enable_interrupt(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,bool exclude_rx)527 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
528 struct rtw_pci *rtwpci, bool exclude_rx)
529 {
530 unsigned long flags;
531 u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
532
533 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
534
535 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
536 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
537 if (rtw_chip_wcpu_3081(rtwdev))
538 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
539
540 rtwpci->irq_enabled = true;
541
542 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
543 }
544
rtw_pci_disable_interrupt(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)545 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
546 struct rtw_pci *rtwpci)
547 {
548 unsigned long flags;
549
550 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
551
552 if (!rtwpci->irq_enabled)
553 goto out;
554
555 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
556 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
557 if (rtw_chip_wcpu_3081(rtwdev))
558 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
559
560 rtwpci->irq_enabled = false;
561
562 out:
563 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
564 }
565
rtw_pci_dma_reset(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)566 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
567 {
568 /* reset dma and rx tag */
569 rtw_write32_set(rtwdev, RTK_PCI_CTRL,
570 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
571 rtwpci->rx_tag = 0;
572 }
573
rtw_pci_setup(struct rtw_dev * rtwdev)574 static int rtw_pci_setup(struct rtw_dev *rtwdev)
575 {
576 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
577
578 rtw_pci_reset_trx_ring(rtwdev);
579 rtw_pci_dma_reset(rtwdev, rtwpci);
580
581 return 0;
582 }
583
rtw_pci_dma_release(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)584 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
585 {
586 struct rtw_pci_tx_ring *tx_ring;
587 enum rtw_tx_queue_type queue;
588
589 rtw_pci_reset_trx_ring(rtwdev);
590 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
591 tx_ring = &rtwpci->tx_rings[queue];
592 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
593 }
594 }
595
rtw_pci_napi_start(struct rtw_dev * rtwdev)596 static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
597 {
598 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
599
600 if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
601 return;
602
603 napi_enable(&rtwpci->napi);
604 }
605
rtw_pci_napi_stop(struct rtw_dev * rtwdev)606 static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
607 {
608 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
609
610 if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
611 return;
612
613 napi_synchronize(&rtwpci->napi);
614 napi_disable(&rtwpci->napi);
615 }
616
rtw_pci_start(struct rtw_dev * rtwdev)617 static int rtw_pci_start(struct rtw_dev *rtwdev)
618 {
619 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
620
621 rtw_pci_napi_start(rtwdev);
622
623 spin_lock_bh(&rtwpci->irq_lock);
624 rtwpci->running = true;
625 rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
626 spin_unlock_bh(&rtwpci->irq_lock);
627
628 return 0;
629 }
630
rtw_pci_stop(struct rtw_dev * rtwdev)631 static void rtw_pci_stop(struct rtw_dev *rtwdev)
632 {
633 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
634 struct pci_dev *pdev = rtwpci->pdev;
635
636 spin_lock_bh(&rtwpci->irq_lock);
637 rtwpci->running = false;
638 rtw_pci_disable_interrupt(rtwdev, rtwpci);
639 spin_unlock_bh(&rtwpci->irq_lock);
640
641 synchronize_irq(pdev->irq);
642 rtw_pci_napi_stop(rtwdev);
643
644 spin_lock_bh(&rtwpci->irq_lock);
645 rtw_pci_dma_release(rtwdev, rtwpci);
646 spin_unlock_bh(&rtwpci->irq_lock);
647 }
648
rtw_pci_deep_ps_enter(struct rtw_dev * rtwdev)649 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
650 {
651 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
652 struct rtw_pci_tx_ring *tx_ring;
653 enum rtw_tx_queue_type queue;
654 bool tx_empty = true;
655
656 if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
657 goto enter_deep_ps;
658
659 lockdep_assert_held(&rtwpci->irq_lock);
660
661 /* Deep PS state is not allowed to TX-DMA */
662 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
663 /* BCN queue is rsvd page, does not have DMA interrupt
664 * H2C queue is managed by firmware
665 */
666 if (queue == RTW_TX_QUEUE_BCN ||
667 queue == RTW_TX_QUEUE_H2C)
668 continue;
669
670 tx_ring = &rtwpci->tx_rings[queue];
671
672 /* check if there is any skb DMAing */
673 if (skb_queue_len(&tx_ring->queue)) {
674 tx_empty = false;
675 break;
676 }
677 }
678
679 if (!tx_empty) {
680 rtw_dbg(rtwdev, RTW_DBG_PS,
681 "TX path not empty, cannot enter deep power save state\n");
682 return;
683 }
684 enter_deep_ps:
685 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
686 rtw_power_mode_change(rtwdev, true);
687 }
688
rtw_pci_deep_ps_leave(struct rtw_dev * rtwdev)689 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
690 {
691 #if defined(__linux__)
692 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
693
694 lockdep_assert_held(&rtwpci->irq_lock);
695 #elif defined(__FreeBSD__)
696 lockdep_assert_held(&((struct rtw_pci *)rtwdev->priv)->irq_lock);
697 #endif
698
699 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
700 rtw_power_mode_change(rtwdev, false);
701 }
702
rtw_pci_deep_ps(struct rtw_dev * rtwdev,bool enter)703 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
704 {
705 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
706
707 spin_lock_bh(&rtwpci->irq_lock);
708
709 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
710 rtw_pci_deep_ps_enter(rtwdev);
711
712 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
713 rtw_pci_deep_ps_leave(rtwdev);
714
715 spin_unlock_bh(&rtwpci->irq_lock);
716 }
717
rtw_pci_release_rsvd_page(struct rtw_pci * rtwpci,struct rtw_pci_tx_ring * ring)718 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
719 struct rtw_pci_tx_ring *ring)
720 {
721 struct sk_buff *prev = skb_dequeue(&ring->queue);
722 struct rtw_pci_tx_data *tx_data;
723 dma_addr_t dma;
724
725 if (!prev)
726 return;
727
728 tx_data = rtw_pci_get_tx_data(prev);
729 dma = tx_data->dma;
730 dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
731 dev_kfree_skb_any(prev);
732 }
733
rtw_pci_dma_check(struct rtw_dev * rtwdev,struct rtw_pci_rx_ring * rx_ring,u32 idx)734 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
735 struct rtw_pci_rx_ring *rx_ring,
736 u32 idx)
737 {
738 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
739 const struct rtw_chip_info *chip = rtwdev->chip;
740 struct rtw_pci_rx_buffer_desc *buf_desc;
741 u32 desc_sz = chip->rx_buf_desc_sz;
742 u16 total_pkt_size;
743
744 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
745 idx * desc_sz);
746 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
747
748 /* rx tag mismatch, throw a warning */
749 if (total_pkt_size != rtwpci->rx_tag)
750 rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
751
752 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
753 }
754
__pci_get_hw_tx_ring_rp(struct rtw_dev * rtwdev,u8 pci_q)755 static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
756 {
757 u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
758 u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
759
760 return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
761 }
762
__pci_flush_queue(struct rtw_dev * rtwdev,u8 pci_q,bool drop)763 static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
764 {
765 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
766 struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
767 u32 cur_rp;
768 u8 i;
769
770 /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
771 * bit dynamic, it's hard to define a reasonable fixed total timeout to
772 * use read_poll_timeout* helper. Instead, we can ensure a reasonable
773 * polling times, so we just use for loop with udelay here.
774 */
775 for (i = 0; i < 30; i++) {
776 cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
777 if (cur_rp == ring->r.wp)
778 return;
779
780 udelay(1);
781 }
782
783 if (!drop)
784 rtw_dbg(rtwdev, RTW_DBG_UNEXP,
785 "timed out to flush pci tx ring[%d]\n", pci_q);
786 }
787
__rtw_pci_flush_queues(struct rtw_dev * rtwdev,u32 pci_queues,bool drop)788 static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
789 bool drop)
790 {
791 u8 q;
792
793 for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
794 /* Unnecessary to flush BCN, H2C and HI tx queues. */
795 if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C ||
796 q == RTW_TX_QUEUE_HI0)
797 continue;
798
799 if (pci_queues & BIT(q))
800 __pci_flush_queue(rtwdev, q, drop);
801 }
802 }
803
rtw_pci_flush_queues(struct rtw_dev * rtwdev,u32 queues,bool drop)804 static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
805 {
806 u32 pci_queues = 0;
807 u8 i;
808
809 /* If all of the hardware queues are requested to flush,
810 * flush all of the pci queues.
811 */
812 if (queues == BIT(rtwdev->hw->queues) - 1) {
813 pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
814 } else {
815 for (i = 0; i < rtwdev->hw->queues; i++)
816 if (queues & BIT(i))
817 pci_queues |= BIT(rtw_tx_ac_to_hwq(i));
818 }
819
820 __rtw_pci_flush_queues(rtwdev, pci_queues, drop);
821 }
822
rtw_pci_tx_kick_off_queue(struct rtw_dev * rtwdev,enum rtw_tx_queue_type queue)823 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,
824 enum rtw_tx_queue_type queue)
825 {
826 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
827 struct rtw_pci_tx_ring *ring;
828 u32 bd_idx;
829
830 ring = &rtwpci->tx_rings[queue];
831 bd_idx = rtw_pci_tx_queue_idx_addr[queue];
832
833 spin_lock_bh(&rtwpci->irq_lock);
834 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
835 rtw_pci_deep_ps_leave(rtwdev);
836 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
837 spin_unlock_bh(&rtwpci->irq_lock);
838 }
839
rtw_pci_tx_kick_off(struct rtw_dev * rtwdev)840 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
841 {
842 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
843 enum rtw_tx_queue_type queue;
844
845 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
846 if (test_and_clear_bit(queue, rtwpci->tx_queued))
847 rtw_pci_tx_kick_off_queue(rtwdev, queue);
848 }
849
rtw_pci_tx_write_data(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb,enum rtw_tx_queue_type queue)850 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
851 struct rtw_tx_pkt_info *pkt_info,
852 struct sk_buff *skb,
853 enum rtw_tx_queue_type queue)
854 {
855 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
856 const struct rtw_chip_info *chip = rtwdev->chip;
857 struct rtw_pci_tx_ring *ring;
858 struct rtw_pci_tx_data *tx_data;
859 dma_addr_t dma;
860 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
861 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
862 u32 size;
863 u32 psb_len;
864 u8 *pkt_desc;
865 struct rtw_pci_tx_buffer_desc *buf_desc;
866
867 ring = &rtwpci->tx_rings[queue];
868
869 size = skb->len;
870
871 if (queue == RTW_TX_QUEUE_BCN)
872 rtw_pci_release_rsvd_page(rtwpci, ring);
873 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
874 return -ENOSPC;
875
876 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
877 memset(pkt_desc, 0, tx_pkt_desc_sz);
878 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
879 rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb);
880 dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
881 DMA_TO_DEVICE);
882 if (dma_mapping_error(&rtwpci->pdev->dev, dma))
883 return -EBUSY;
884
885 /* after this we got dma mapped, there is no way back */
886 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
887 memset(buf_desc, 0, tx_buf_desc_sz);
888 psb_len = (skb->len - 1) / 128 + 1;
889 if (queue == RTW_TX_QUEUE_BCN)
890 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
891
892 buf_desc[0].psb_len = cpu_to_le16(psb_len);
893 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
894 buf_desc[0].dma = cpu_to_le32(dma);
895 buf_desc[1].buf_size = cpu_to_le16(size);
896 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
897
898 tx_data = rtw_pci_get_tx_data(skb);
899 tx_data->dma = dma;
900 tx_data->sn = pkt_info->sn;
901
902 spin_lock_bh(&rtwpci->irq_lock);
903
904 skb_queue_tail(&ring->queue, skb);
905
906 if (queue == RTW_TX_QUEUE_BCN)
907 goto out_unlock;
908
909 /* update write-index, and kick it off later */
910 set_bit(queue, rtwpci->tx_queued);
911 if (++ring->r.wp >= ring->r.len)
912 ring->r.wp = 0;
913
914 out_unlock:
915 spin_unlock_bh(&rtwpci->irq_lock);
916
917 return 0;
918 }
919
rtw_pci_write_data_rsvd_page(struct rtw_dev * rtwdev,u8 * buf,u32 size)920 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
921 u32 size)
922 {
923 struct sk_buff *skb;
924 struct rtw_tx_pkt_info pkt_info = {0};
925 u8 reg_bcn_work;
926 int ret;
927
928 skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
929 if (!skb)
930 return -ENOMEM;
931
932 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
933 if (ret) {
934 #if defined(__FreeBSD__)
935 dev_kfree_skb_any(skb);
936 #endif
937 rtw_err(rtwdev, "failed to write rsvd page data\n");
938 return ret;
939 }
940
941 /* reserved pages go through beacon queue */
942 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
943 reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
944 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
945
946 return 0;
947 }
948
rtw_pci_write_data_h2c(struct rtw_dev * rtwdev,u8 * buf,u32 size)949 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
950 {
951 struct sk_buff *skb;
952 struct rtw_tx_pkt_info pkt_info = {0};
953 int ret;
954
955 skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
956 if (!skb)
957 return -ENOMEM;
958
959 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
960 if (ret) {
961 #if defined(__FreeBSD__)
962 dev_kfree_skb_any(skb);
963 #endif
964 rtw_err(rtwdev, "failed to write h2c data\n");
965 return ret;
966 }
967
968 rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
969
970 return 0;
971 }
972
rtw_pci_tx_write(struct rtw_dev * rtwdev,struct rtw_tx_pkt_info * pkt_info,struct sk_buff * skb)973 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
974 struct rtw_tx_pkt_info *pkt_info,
975 struct sk_buff *skb)
976 {
977 enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
978 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
979 struct rtw_pci_tx_ring *ring;
980 int ret;
981
982 ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
983 if (ret)
984 return ret;
985
986 ring = &rtwpci->tx_rings[queue];
987 spin_lock_bh(&rtwpci->irq_lock);
988 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
989 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
990 ring->queue_stopped = true;
991 }
992 spin_unlock_bh(&rtwpci->irq_lock);
993
994 return 0;
995 }
996
rtw_pci_tx_isr(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u8 hw_queue)997 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
998 u8 hw_queue)
999 {
1000 struct ieee80211_hw *hw = rtwdev->hw;
1001 struct ieee80211_tx_info *info;
1002 struct rtw_pci_tx_ring *ring;
1003 struct rtw_pci_tx_data *tx_data;
1004 struct sk_buff *skb;
1005 u32 count;
1006 u32 bd_idx_addr;
1007 u32 bd_idx, cur_rp, rp_idx;
1008 u16 q_map;
1009
1010 ring = &rtwpci->tx_rings[hw_queue];
1011
1012 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
1013 bd_idx = rtw_read32(rtwdev, bd_idx_addr);
1014 cur_rp = bd_idx >> 16;
1015 cur_rp &= TRX_BD_IDX_MASK;
1016 rp_idx = ring->r.rp;
1017 if (cur_rp >= ring->r.rp)
1018 count = cur_rp - ring->r.rp;
1019 else
1020 count = ring->r.len - (ring->r.rp - cur_rp);
1021
1022 while (count--) {
1023 skb = skb_dequeue(&ring->queue);
1024 if (!skb) {
1025 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
1026 count, hw_queue, bd_idx, ring->r.rp, cur_rp);
1027 break;
1028 }
1029 tx_data = rtw_pci_get_tx_data(skb);
1030 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
1031 DMA_TO_DEVICE);
1032
1033 /* just free command packets from host to card */
1034 if (hw_queue == RTW_TX_QUEUE_H2C) {
1035 dev_kfree_skb_irq(skb);
1036 continue;
1037 }
1038
1039 if (ring->queue_stopped &&
1040 avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
1041 q_map = skb_get_queue_mapping(skb);
1042 ieee80211_wake_queue(hw, q_map);
1043 ring->queue_stopped = false;
1044 }
1045
1046 if (++rp_idx >= ring->r.len)
1047 rp_idx = 0;
1048
1049 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1050
1051 info = IEEE80211_SKB_CB(skb);
1052
1053 /* enqueue to wait for tx report */
1054 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1055 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1056 continue;
1057 }
1058
1059 /* always ACK for others, then they won't be marked as drop */
1060 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1061 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1062 else
1063 info->flags |= IEEE80211_TX_STAT_ACK;
1064
1065 ieee80211_tx_info_clear_status(info);
1066 ieee80211_tx_status_irqsafe(hw, skb);
1067 }
1068
1069 ring->r.rp = cur_rp;
1070 }
1071
rtw_pci_rx_isr(struct rtw_dev * rtwdev)1072 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1073 {
1074 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1075 struct napi_struct *napi = &rtwpci->napi;
1076
1077 napi_schedule(napi);
1078 }
1079
rtw_pci_get_hw_rx_ring_nr(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci)1080 static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1081 struct rtw_pci *rtwpci)
1082 {
1083 struct rtw_pci_rx_ring *ring;
1084 int count = 0;
1085 u32 tmp, cur_wp;
1086
1087 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1088 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1089 cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1090 if (cur_wp >= ring->r.wp)
1091 count = cur_wp - ring->r.wp;
1092 else
1093 count = ring->r.len - (ring->r.wp - cur_wp);
1094
1095 return count;
1096 }
1097
rtw_pci_rx_napi(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u8 hw_queue,u32 limit)1098 static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1099 u8 hw_queue, u32 limit)
1100 {
1101 const struct rtw_chip_info *chip = rtwdev->chip;
1102 struct napi_struct *napi = &rtwpci->napi;
1103 struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1104 struct rtw_rx_pkt_stat pkt_stat;
1105 struct ieee80211_rx_status rx_status;
1106 struct sk_buff *skb, *new;
1107 u32 cur_rp = ring->r.rp;
1108 u32 count, rx_done = 0;
1109 u32 pkt_offset;
1110 u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1111 u32 buf_desc_sz = chip->rx_buf_desc_sz;
1112 u32 new_len;
1113 u8 *rx_desc;
1114 dma_addr_t dma;
1115
1116 count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1117 count = min(count, limit);
1118
1119 while (count--) {
1120 rtw_pci_dma_check(rtwdev, ring, cur_rp);
1121 skb = ring->buf[cur_rp];
1122 dma = *((dma_addr_t *)skb->cb);
1123 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1124 DMA_FROM_DEVICE);
1125 rx_desc = skb->data;
1126 rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1127
1128 /* offset from rx_desc to payload */
1129 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1130 pkt_stat.shift;
1131
1132 /* allocate a new skb for this frame,
1133 * discard the frame if none available
1134 */
1135 new_len = pkt_stat.pkt_len + pkt_offset;
1136 new = dev_alloc_skb(new_len);
1137 if (WARN_ONCE(!new, "rx routine starvation\n"))
1138 goto next_rp;
1139
1140 /* put the DMA data including rx_desc from phy to new skb */
1141 skb_put_data(new, skb->data, new_len);
1142
1143 if (pkt_stat.is_c2h) {
1144 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1145 } else {
1146 /* remove rx_desc */
1147 skb_pull(new, pkt_offset);
1148
1149 rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat);
1150 rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1151 memcpy(new->cb, &rx_status, sizeof(rx_status));
1152 ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1153 rx_done++;
1154 }
1155
1156 next_rp:
1157 /* new skb delivered to mac80211, re-enable original skb DMA */
1158 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1159 buf_desc_sz);
1160
1161 /* host read next element in ring */
1162 if (++cur_rp >= ring->r.len)
1163 cur_rp = 0;
1164 }
1165
1166 ring->r.rp = cur_rp;
1167 /* 'rp', the last position we have read, is seen as previous posistion
1168 * of 'wp' that is used to calculate 'count' next time.
1169 */
1170 ring->r.wp = cur_rp;
1171 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1172
1173 return rx_done;
1174 }
1175
rtw_pci_irq_recognized(struct rtw_dev * rtwdev,struct rtw_pci * rtwpci,u32 * irq_status)1176 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1177 struct rtw_pci *rtwpci, u32 *irq_status)
1178 {
1179 unsigned long flags;
1180
1181 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1182
1183 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1184 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1185 if (rtw_chip_wcpu_3081(rtwdev))
1186 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1187 else
1188 irq_status[3] = 0;
1189 irq_status[0] &= rtwpci->irq_mask[0];
1190 irq_status[1] &= rtwpci->irq_mask[1];
1191 irq_status[3] &= rtwpci->irq_mask[3];
1192 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1193 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1194 if (rtw_chip_wcpu_3081(rtwdev))
1195 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1196
1197 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1198 }
1199
rtw_pci_interrupt_handler(int irq,void * dev)1200 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1201 {
1202 struct rtw_dev *rtwdev = dev;
1203 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1204
1205 /* disable RTW PCI interrupt to avoid more interrupts before the end of
1206 * thread function
1207 *
1208 * disable HIMR here to also avoid new HISR flag being raised before
1209 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1210 * are cleared, the edge-triggered interrupt will not be generated when
1211 * a new HISR flag is set.
1212 */
1213 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1214
1215 return IRQ_WAKE_THREAD;
1216 }
1217
rtw_pci_interrupt_threadfn(int irq,void * dev)1218 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1219 {
1220 struct rtw_dev *rtwdev = dev;
1221 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1222 u32 irq_status[4];
1223 bool rx = false;
1224
1225 spin_lock_bh(&rtwpci->irq_lock);
1226 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1227
1228 if (irq_status[0] & IMR_MGNTDOK)
1229 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1230 if (irq_status[0] & IMR_HIGHDOK)
1231 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1232 if (irq_status[0] & IMR_BEDOK)
1233 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1234 if (irq_status[0] & IMR_BKDOK)
1235 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1236 if (irq_status[0] & IMR_VODOK)
1237 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1238 if (irq_status[0] & IMR_VIDOK)
1239 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1240 if (irq_status[3] & IMR_H2CDOK)
1241 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1242 if (irq_status[0] & IMR_ROK) {
1243 rtw_pci_rx_isr(rtwdev);
1244 rx = true;
1245 }
1246 if (unlikely(irq_status[0] & IMR_C2HCMD))
1247 rtw_fw_c2h_cmd_isr(rtwdev);
1248
1249 /* all of the jobs for this interrupt have been done */
1250 if (rtwpci->running)
1251 rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1252 spin_unlock_bh(&rtwpci->irq_lock);
1253
1254 return IRQ_HANDLED;
1255 }
1256
rtw_pci_io_mapping(struct rtw_dev * rtwdev,struct pci_dev * pdev)1257 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1258 struct pci_dev *pdev)
1259 {
1260 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1261 unsigned long len;
1262 u8 bar_id = 2;
1263 int ret;
1264
1265 ret = pci_request_regions(pdev, KBUILD_MODNAME);
1266 if (ret) {
1267 rtw_err(rtwdev, "failed to request pci regions\n");
1268 return ret;
1269 }
1270
1271 #if defined(__FreeBSD__)
1272 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1273 if (ret) {
1274 rtw_err(rtwdev, "failed to set dma mask to 32-bit\n");
1275 goto err_release_regions;
1276 }
1277
1278 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1279 if (ret) {
1280 rtw_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
1281 goto err_release_regions;
1282 }
1283 #endif
1284
1285 len = pci_resource_len(pdev, bar_id);
1286 #if defined(__FreeBSD__)
1287 linuxkpi_pcim_want_to_use_bus_functions(pdev);
1288 #endif
1289 rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1290 if (!rtwpci->mmap) {
1291 pci_release_regions(pdev);
1292 rtw_err(rtwdev, "failed to map pci memory\n");
1293 return -ENOMEM;
1294 }
1295
1296 return 0;
1297 #if defined(__FreeBSD__)
1298 err_release_regions:
1299 pci_release_regions(pdev);
1300 return ret;
1301 #endif
1302 }
1303
rtw_pci_io_unmapping(struct rtw_dev * rtwdev,struct pci_dev * pdev)1304 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1305 struct pci_dev *pdev)
1306 {
1307 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1308
1309 if (rtwpci->mmap) {
1310 pci_iounmap(pdev, rtwpci->mmap);
1311 pci_release_regions(pdev);
1312 }
1313 }
1314
rtw_dbi_write8(struct rtw_dev * rtwdev,u16 addr,u8 data)1315 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1316 {
1317 u16 write_addr;
1318 u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1319 u8 flag;
1320 u8 cnt;
1321
1322 write_addr = addr & BITS_DBI_ADDR_MASK;
1323 write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1324 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1325 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1326 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1327
1328 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1329 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1330 if (flag == 0)
1331 return;
1332
1333 udelay(10);
1334 }
1335
1336 WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1337 }
1338
rtw_dbi_read8(struct rtw_dev * rtwdev,u16 addr,u8 * value)1339 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1340 {
1341 u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1342 u8 flag;
1343 u8 cnt;
1344
1345 rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1346 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1347
1348 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1349 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1350 if (flag == 0) {
1351 read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1352 *value = rtw_read8(rtwdev, read_addr);
1353 return 0;
1354 }
1355
1356 udelay(10);
1357 }
1358
1359 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1360 return -EIO;
1361 }
1362
rtw_mdio_write(struct rtw_dev * rtwdev,u8 addr,u16 data,bool g1)1363 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1364 {
1365 u8 page;
1366 u8 wflag;
1367 u8 cnt;
1368
1369 rtw_write16(rtwdev, REG_MDIO_V1, data);
1370
1371 page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1372 page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1373 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1374 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1375 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1376
1377 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1378 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1379 BIT_MDIO_WFLAG_V1);
1380 if (wflag == 0)
1381 return;
1382
1383 udelay(10);
1384 }
1385
1386 WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1387 }
1388
rtw_pci_clkreq_set(struct rtw_dev * rtwdev,bool enable)1389 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1390 {
1391 u8 value;
1392 int ret;
1393
1394 if (rtw_pci_disable_aspm)
1395 return;
1396
1397 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1398 if (ret) {
1399 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1400 return;
1401 }
1402
1403 if (enable)
1404 value |= BIT_CLKREQ_SW_EN;
1405 else
1406 value &= ~BIT_CLKREQ_SW_EN;
1407
1408 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1409 }
1410
rtw_pci_clkreq_pad_low(struct rtw_dev * rtwdev,bool enable)1411 static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1412 {
1413 u8 value;
1414 int ret;
1415
1416 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1417 if (ret) {
1418 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1419 return;
1420 }
1421
1422 if (enable)
1423 value &= ~BIT_CLKREQ_N_PAD;
1424 else
1425 value |= BIT_CLKREQ_N_PAD;
1426
1427 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1428 }
1429
rtw_pci_aspm_set(struct rtw_dev * rtwdev,bool enable)1430 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1431 {
1432 u8 value;
1433 int ret;
1434
1435 if (rtw_pci_disable_aspm)
1436 return;
1437
1438 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1439 if (ret) {
1440 rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1441 return;
1442 }
1443
1444 if (enable)
1445 value |= BIT_L1_SW_EN;
1446 else
1447 value &= ~BIT_L1_SW_EN;
1448
1449 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1450 }
1451
rtw_pci_link_ps(struct rtw_dev * rtwdev,bool enter)1452 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1453 {
1454 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1455
1456 /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1457 * only be enabled when host supports it.
1458 *
1459 * And ASPM mechanism should be enabled when driver/firmware enters
1460 * power save mode, without having heavy traffic. Because we've
1461 * experienced some inter-operability issues that the link tends
1462 * to enter L1 state on the fly even when driver is having high
1463 * throughput. This is probably because the ASPM behavior slightly
1464 * varies from different SOC.
1465 */
1466 if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1467 return;
1468
1469 if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1470 (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1471 rtw_pci_aspm_set(rtwdev, enter);
1472 }
1473
rtw_pci_link_cfg(struct rtw_dev * rtwdev)1474 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1475 {
1476 const struct rtw_chip_info *chip = rtwdev->chip;
1477 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1478 struct pci_dev *pdev = rtwpci->pdev;
1479 u16 link_ctrl;
1480 int ret;
1481
1482 /* RTL8822CE has enabled REFCLK auto calibration, it does not need
1483 * to add clock delay to cover the REFCLK timing gap.
1484 */
1485 if (chip->id == RTW_CHIP_TYPE_8822C)
1486 rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1487
1488 /* Though there is standard PCIE configuration space to set the
1489 * link control register, but by Realtek's design, driver should
1490 * check if host supports CLKREQ/ASPM to enable the HW module.
1491 *
1492 * These functions are implemented by two HW modules associated,
1493 * one is responsible to access PCIE configuration space to
1494 * follow the host settings, and another is in charge of doing
1495 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1496 * the host does not support it, and due to some reasons or wrong
1497 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1498 * loss if HW misbehaves on the link.
1499 *
1500 * Hence it's designed that driver should first check the PCIE
1501 * configuration space is sync'ed and enabled, then driver can turn
1502 * on the other module that is actually working on the mechanism.
1503 */
1504 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1505 if (ret) {
1506 rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1507 return;
1508 }
1509
1510 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1511 rtw_pci_clkreq_set(rtwdev, true);
1512
1513 rtwpci->link_ctrl = link_ctrl;
1514 }
1515
rtw_pci_interface_cfg(struct rtw_dev * rtwdev)1516 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1517 {
1518 const struct rtw_chip_info *chip = rtwdev->chip;
1519
1520 switch (chip->id) {
1521 case RTW_CHIP_TYPE_8822C:
1522 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1523 rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1524 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1525 break;
1526 default:
1527 break;
1528 }
1529 }
1530
rtw_pci_phy_cfg(struct rtw_dev * rtwdev)1531 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1532 {
1533 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1534 const struct rtw_chip_info *chip = rtwdev->chip;
1535 struct rtw_efuse *efuse = &rtwdev->efuse;
1536 struct pci_dev *pdev = rtwpci->pdev;
1537 const struct rtw_intf_phy_para *para;
1538 u16 cut;
1539 u16 value;
1540 u16 offset;
1541 int i;
1542 int ret;
1543
1544 cut = BIT(0) << rtwdev->hal.cut_version;
1545
1546 for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1547 para = &chip->intf_table->gen1_para[i];
1548 if (!(para->cut_mask & cut))
1549 continue;
1550 if (para->offset == 0xffff)
1551 break;
1552 offset = para->offset;
1553 value = para->value;
1554 if (para->ip_sel == RTW_IP_SEL_PHY)
1555 rtw_mdio_write(rtwdev, offset, value, true);
1556 else
1557 rtw_dbi_write8(rtwdev, offset, value);
1558 }
1559
1560 for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1561 para = &chip->intf_table->gen2_para[i];
1562 if (!(para->cut_mask & cut))
1563 continue;
1564 if (para->offset == 0xffff)
1565 break;
1566 offset = para->offset;
1567 value = para->value;
1568 if (para->ip_sel == RTW_IP_SEL_PHY)
1569 rtw_mdio_write(rtwdev, offset, value, false);
1570 else
1571 rtw_dbi_write8(rtwdev, offset, value);
1572 }
1573
1574 rtw_pci_link_cfg(rtwdev);
1575
1576 /* Disable 8821ce completion timeout by default */
1577 if (chip->id == RTW_CHIP_TYPE_8821C) {
1578 ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1579 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
1580 if (ret)
1581 rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
1582 ret);
1583 }
1584
1585 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
1586 rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);
1587 }
1588
rtw_pci_suspend(struct device * dev)1589 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1590 {
1591 struct ieee80211_hw *hw = dev_get_drvdata(dev);
1592 struct rtw_dev *rtwdev = hw->priv;
1593 const struct rtw_chip_info *chip = rtwdev->chip;
1594 struct rtw_efuse *efuse = &rtwdev->efuse;
1595
1596 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1597 rtw_pci_clkreq_pad_low(rtwdev, true);
1598 return 0;
1599 }
1600
rtw_pci_resume(struct device * dev)1601 static int __maybe_unused rtw_pci_resume(struct device *dev)
1602 {
1603 struct ieee80211_hw *hw = dev_get_drvdata(dev);
1604 struct rtw_dev *rtwdev = hw->priv;
1605 const struct rtw_chip_info *chip = rtwdev->chip;
1606 struct rtw_efuse *efuse = &rtwdev->efuse;
1607
1608 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1609 rtw_pci_clkreq_pad_low(rtwdev, false);
1610 return 0;
1611 }
1612
1613 SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1614 EXPORT_SYMBOL(rtw_pm_ops);
1615
rtw_pci_claim(struct rtw_dev * rtwdev,struct pci_dev * pdev)1616 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1617 {
1618 int ret;
1619
1620 ret = pci_enable_device(pdev);
1621 if (ret) {
1622 rtw_err(rtwdev, "failed to enable pci device\n");
1623 return ret;
1624 }
1625
1626 pci_set_master(pdev);
1627 pci_set_drvdata(pdev, rtwdev->hw);
1628 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1629
1630 return 0;
1631 }
1632
rtw_pci_declaim(struct rtw_dev * rtwdev,struct pci_dev * pdev)1633 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1634 {
1635 pci_disable_device(pdev);
1636 }
1637
rtw_pci_setup_resource(struct rtw_dev * rtwdev,struct pci_dev * pdev)1638 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1639 {
1640 struct rtw_pci *rtwpci;
1641 int ret;
1642
1643 rtwpci = (struct rtw_pci *)rtwdev->priv;
1644 rtwpci->pdev = pdev;
1645
1646 /* after this driver can access to hw registers */
1647 ret = rtw_pci_io_mapping(rtwdev, pdev);
1648 if (ret) {
1649 rtw_err(rtwdev, "failed to request pci io region\n");
1650 goto err_out;
1651 }
1652
1653 ret = rtw_pci_init(rtwdev);
1654 if (ret) {
1655 rtw_err(rtwdev, "failed to allocate pci resources\n");
1656 goto err_io_unmap;
1657 }
1658
1659 return 0;
1660
1661 err_io_unmap:
1662 rtw_pci_io_unmapping(rtwdev, pdev);
1663
1664 err_out:
1665 return ret;
1666 }
1667
rtw_pci_destroy(struct rtw_dev * rtwdev,struct pci_dev * pdev)1668 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1669 {
1670 rtw_pci_deinit(rtwdev);
1671 rtw_pci_io_unmapping(rtwdev, pdev);
1672 }
1673
1674 static const struct rtw_hci_ops rtw_pci_ops = {
1675 .tx_write = rtw_pci_tx_write,
1676 .tx_kick_off = rtw_pci_tx_kick_off,
1677 .flush_queues = rtw_pci_flush_queues,
1678 .setup = rtw_pci_setup,
1679 .start = rtw_pci_start,
1680 .stop = rtw_pci_stop,
1681 .deep_ps = rtw_pci_deep_ps,
1682 .link_ps = rtw_pci_link_ps,
1683 .interface_cfg = rtw_pci_interface_cfg,
1684 .dynamic_rx_agg = NULL,
1685 .write_firmware_page = rtw_write_firmware_page,
1686
1687 .read8 = rtw_pci_read8,
1688 .read16 = rtw_pci_read16,
1689 .read32 = rtw_pci_read32,
1690 .write8 = rtw_pci_write8,
1691 .write16 = rtw_pci_write16,
1692 .write32 = rtw_pci_write32,
1693 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1694 .write_data_h2c = rtw_pci_write_data_h2c,
1695 };
1696
rtw_pci_request_irq(struct rtw_dev * rtwdev,struct pci_dev * pdev)1697 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1698 {
1699 unsigned int flags = PCI_IRQ_INTX;
1700 int ret;
1701
1702 if (!rtw_disable_msi)
1703 flags |= PCI_IRQ_MSI;
1704
1705 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1706 if (ret < 0) {
1707 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1708 return ret;
1709 }
1710
1711 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1712 rtw_pci_interrupt_handler,
1713 rtw_pci_interrupt_threadfn,
1714 IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1715 if (ret) {
1716 rtw_err(rtwdev, "failed to request irq %d\n", ret);
1717 pci_free_irq_vectors(pdev);
1718 }
1719
1720 return ret;
1721 }
1722
rtw_pci_free_irq(struct rtw_dev * rtwdev,struct pci_dev * pdev)1723 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1724 {
1725 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1726 pci_free_irq_vectors(pdev);
1727 }
1728
rtw_pci_napi_poll(struct napi_struct * napi,int budget)1729 static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1730 {
1731 struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1732 struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1733 priv);
1734 int work_done = 0;
1735
1736 if (rtwpci->rx_no_aspm)
1737 rtw_pci_link_ps(rtwdev, false);
1738
1739 while (work_done < budget) {
1740 u32 work_done_once;
1741
1742 work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1743 budget - work_done);
1744 if (work_done_once == 0)
1745 break;
1746 work_done += work_done_once;
1747 }
1748 if (work_done < budget) {
1749 napi_complete_done(napi, work_done);
1750 spin_lock_bh(&rtwpci->irq_lock);
1751 if (rtwpci->running)
1752 rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1753 spin_unlock_bh(&rtwpci->irq_lock);
1754 /* When ISR happens during polling and before napi_complete
1755 * while no further data is received. Data on the dma_ring will
1756 * not be processed immediately. Check whether dma ring is
1757 * empty and perform napi_schedule accordingly.
1758 */
1759 if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1760 napi_schedule(napi);
1761 }
1762 if (rtwpci->rx_no_aspm)
1763 rtw_pci_link_ps(rtwdev, true);
1764
1765 return work_done;
1766 }
1767
rtw_pci_napi_init(struct rtw_dev * rtwdev)1768 static int rtw_pci_napi_init(struct rtw_dev *rtwdev)
1769 {
1770 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1771
1772 rtwpci->netdev = alloc_netdev_dummy(0);
1773 if (!rtwpci->netdev)
1774 return -ENOMEM;
1775
1776 netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
1777 return 0;
1778 }
1779
rtw_pci_napi_deinit(struct rtw_dev * rtwdev)1780 static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1781 {
1782 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1783
1784 rtw_pci_napi_stop(rtwdev);
1785 netif_napi_del(&rtwpci->napi);
1786 free_netdev(rtwpci->netdev);
1787 }
1788
rtw_pci_io_err_detected(struct pci_dev * pdev,pci_channel_state_t state)1789 static pci_ers_result_t rtw_pci_io_err_detected(struct pci_dev *pdev,
1790 pci_channel_state_t state)
1791 {
1792 struct net_device *netdev = pci_get_drvdata(pdev);
1793
1794 netif_device_detach(netdev);
1795
1796 return PCI_ERS_RESULT_NEED_RESET;
1797 }
1798
rtw_pci_io_slot_reset(struct pci_dev * pdev)1799 static pci_ers_result_t rtw_pci_io_slot_reset(struct pci_dev *pdev)
1800 {
1801 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1802 struct rtw_dev *rtwdev = hw->priv;
1803
1804 rtw_fw_recovery(rtwdev);
1805
1806 return PCI_ERS_RESULT_RECOVERED;
1807 }
1808
rtw_pci_io_resume(struct pci_dev * pdev)1809 static void rtw_pci_io_resume(struct pci_dev *pdev)
1810 {
1811 struct net_device *netdev = pci_get_drvdata(pdev);
1812
1813 /* ack any pending wake events, disable PME */
1814 pci_enable_wake(pdev, PCI_D0, 0);
1815
1816 netif_device_attach(netdev);
1817 }
1818
1819 const struct pci_error_handlers rtw_pci_err_handler = {
1820 .error_detected = rtw_pci_io_err_detected,
1821 .slot_reset = rtw_pci_io_slot_reset,
1822 .resume = rtw_pci_io_resume,
1823 };
1824 EXPORT_SYMBOL(rtw_pci_err_handler);
1825
rtw_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1826 int rtw_pci_probe(struct pci_dev *pdev,
1827 const struct pci_device_id *id)
1828 {
1829 struct pci_dev *bridge = pci_upstream_bridge(pdev);
1830 struct ieee80211_hw *hw;
1831 struct rtw_dev *rtwdev;
1832 struct rtw_pci *rtwpci;
1833 int drv_data_size;
1834 int ret;
1835
1836 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1837 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1838 if (!hw) {
1839 dev_err(&pdev->dev, "failed to allocate hw\n");
1840 return -ENOMEM;
1841 }
1842
1843 rtwdev = hw->priv;
1844 rtwdev->hw = hw;
1845 rtwdev->dev = &pdev->dev;
1846 rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1847 rtwdev->hci.ops = &rtw_pci_ops;
1848 rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1849
1850 rtwpci = (struct rtw_pci *)rtwdev->priv;
1851 atomic_set(&rtwpci->link_usage, 1);
1852
1853 ret = rtw_core_init(rtwdev);
1854 if (ret)
1855 goto err_release_hw;
1856
1857 rtw_dbg(rtwdev, RTW_DBG_PCI,
1858 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1859 pdev->vendor, pdev->device, pdev->revision);
1860
1861 ret = rtw_pci_claim(rtwdev, pdev);
1862 if (ret) {
1863 rtw_err(rtwdev, "failed to claim pci device\n");
1864 goto err_deinit_core;
1865 }
1866
1867 ret = rtw_pci_setup_resource(rtwdev, pdev);
1868 if (ret) {
1869 rtw_err(rtwdev, "failed to setup pci resources\n");
1870 goto err_pci_declaim;
1871 }
1872
1873 ret = rtw_pci_napi_init(rtwdev);
1874 if (ret) {
1875 rtw_err(rtwdev, "failed to setup NAPI\n");
1876 goto err_pci_declaim;
1877 }
1878
1879 ret = rtw_chip_info_setup(rtwdev);
1880 if (ret) {
1881 rtw_err(rtwdev, "failed to setup chip information\n");
1882 goto err_destroy_pci;
1883 }
1884
1885 /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
1886 if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
1887 rtwpci->rx_no_aspm = true;
1888
1889 rtw_pci_phy_cfg(rtwdev);
1890
1891 ret = rtw_register_hw(rtwdev, hw);
1892 if (ret) {
1893 rtw_err(rtwdev, "failed to register hw\n");
1894 goto err_destroy_pci;
1895 }
1896
1897 ret = rtw_pci_request_irq(rtwdev, pdev);
1898 if (ret) {
1899 ieee80211_unregister_hw(hw);
1900 goto err_destroy_pci;
1901 }
1902
1903 return 0;
1904
1905 err_destroy_pci:
1906 rtw_pci_napi_deinit(rtwdev);
1907 rtw_pci_destroy(rtwdev, pdev);
1908
1909 err_pci_declaim:
1910 rtw_pci_declaim(rtwdev, pdev);
1911
1912 err_deinit_core:
1913 rtw_core_deinit(rtwdev);
1914
1915 err_release_hw:
1916 ieee80211_free_hw(hw);
1917
1918 return ret;
1919 }
1920 EXPORT_SYMBOL(rtw_pci_probe);
1921
rtw_pci_remove(struct pci_dev * pdev)1922 void rtw_pci_remove(struct pci_dev *pdev)
1923 {
1924 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1925 struct rtw_dev *rtwdev;
1926 struct rtw_pci *rtwpci;
1927
1928 if (!hw)
1929 return;
1930
1931 rtwdev = hw->priv;
1932 rtwpci = (struct rtw_pci *)rtwdev->priv;
1933
1934 rtw_unregister_hw(rtwdev, hw);
1935 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1936 rtw_pci_napi_deinit(rtwdev);
1937 rtw_pci_destroy(rtwdev, pdev);
1938 rtw_pci_declaim(rtwdev, pdev);
1939 rtw_pci_free_irq(rtwdev, pdev);
1940 rtw_core_deinit(rtwdev);
1941 ieee80211_free_hw(hw);
1942 }
1943 EXPORT_SYMBOL(rtw_pci_remove);
1944
rtw_pci_shutdown(struct pci_dev * pdev)1945 void rtw_pci_shutdown(struct pci_dev *pdev)
1946 {
1947 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1948 struct rtw_dev *rtwdev;
1949 const struct rtw_chip_info *chip;
1950
1951 if (!hw)
1952 return;
1953
1954 rtwdev = hw->priv;
1955 chip = rtwdev->chip;
1956
1957 if (chip->ops->shutdown)
1958 chip->ops->shutdown(rtwdev);
1959
1960 pci_set_power_state(pdev, PCI_D3hot);
1961 }
1962 EXPORT_SYMBOL(rtw_pci_shutdown);
1963
1964 MODULE_AUTHOR("Realtek Corporation");
1965 MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver");
1966 MODULE_LICENSE("Dual BSD/GPL");
1967 #if defined(__FreeBSD__)
1968 MODULE_VERSION(rtw_pci, 1);
1969 MODULE_DEPEND(rtw_pci, linuxkpi, 1, 1, 1);
1970 MODULE_DEPEND(rtw_pci, linuxkpi_wlan, 1, 1, 1);
1971 #ifdef CONFIG_RTW88_DEBUGFS
1972 MODULE_DEPEND(rtw_pci, lindebugfs, 1, 1, 1);
1973 #endif
1974 #endif
1975