xref: /linux/drivers/net/wireless/realtek/rtw89/pci.c (revision 2bd87951de659df3381ce083342aaf5b1ea24689)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2020  Realtek Corporation
3  */
4 
5 #include <linux/pci.h>
6 
7 #include "mac.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "ser.h"
11 
12 static bool rtw89_pci_disable_clkreq;
13 static bool rtw89_pci_disable_aspm_l1;
14 static bool rtw89_pci_disable_l1ss;
15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
21 
22 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
23 						  u32 *phy_offset)
24 {
25 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
26 	struct pci_dev *pdev = rtwpci->pdev;
27 	u32 val;
28 	int ret;
29 
30 	ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
31 	if (ret)
32 		return ret;
33 
34 	val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK);
35 	if (val == RTW89_PCIE_GEN1_SPEED) {
36 		*phy_offset = R_RAC_DIRECT_OFFSET_G1;
37 	} else if (val == RTW89_PCIE_GEN2_SPEED) {
38 		*phy_offset = R_RAC_DIRECT_OFFSET_G2;
39 	} else {
40 		rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val);
41 		return -EFAULT;
42 	}
43 
44 	return 0;
45 }
46 
47 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
48 {
49 	u32 val;
50 	int ret;
51 
52 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM);
53 
54 	ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
55 				       1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
56 				       rtwdev, R_AX_PCIE_INIT_CFG1);
57 
58 	return ret;
59 }
60 
61 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
62 				struct rtw89_pci_dma_ring *bd_ring,
63 				u32 cur_idx, bool tx)
64 {
65 	const struct rtw89_pci_info *info = rtwdev->pci_info;
66 	u32 cnt, cur_rp, wp, rp, len;
67 
68 	rp = bd_ring->rp;
69 	wp = bd_ring->wp;
70 	len = bd_ring->len;
71 
72 	cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
73 	if (tx) {
74 		cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
75 	} else {
76 		if (info->rx_ring_eq_is_full)
77 			wp += 1;
78 
79 		cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
80 	}
81 
82 	bd_ring->rp = cur_rp;
83 
84 	return cnt;
85 }
86 
87 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
88 				 struct rtw89_pci_tx_ring *tx_ring)
89 {
90 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
91 	u32 addr_idx = bd_ring->addr.idx;
92 	u32 cnt, idx;
93 
94 	idx = rtw89_read32(rtwdev, addr_idx);
95 	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
96 
97 	return cnt;
98 }
99 
100 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
101 				    struct rtw89_pci *rtwpci,
102 				    u32 cnt, bool release_all)
103 {
104 	struct rtw89_pci_tx_data *tx_data;
105 	struct sk_buff *skb;
106 	u32 qlen;
107 
108 	while (cnt--) {
109 		skb = skb_dequeue(&rtwpci->h2c_queue);
110 		if (!skb) {
111 			rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
112 			return;
113 		}
114 		skb_queue_tail(&rtwpci->h2c_release_queue, skb);
115 	}
116 
117 	qlen = skb_queue_len(&rtwpci->h2c_release_queue);
118 	if (!release_all)
119 	       qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
120 
121 	while (qlen--) {
122 		skb = skb_dequeue(&rtwpci->h2c_release_queue);
123 		if (!skb) {
124 			rtw89_err(rtwdev, "failed to release fwcmd\n");
125 			return;
126 		}
127 		tx_data = RTW89_PCI_TX_SKB_CB(skb);
128 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
129 				 DMA_TO_DEVICE);
130 		dev_kfree_skb_any(skb);
131 	}
132 }
133 
134 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
135 				       struct rtw89_pci *rtwpci)
136 {
137 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
138 	u32 cnt;
139 
140 	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
141 	if (!cnt)
142 		return;
143 	rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
144 }
145 
146 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
147 				 struct rtw89_pci_rx_ring *rx_ring)
148 {
149 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
150 	u32 addr_idx = bd_ring->addr.idx;
151 	u32 cnt, idx;
152 
153 	idx = rtw89_read32(rtwdev, addr_idx);
154 	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
155 
156 	return cnt;
157 }
158 
159 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
160 				       struct sk_buff *skb)
161 {
162 	struct rtw89_pci_rx_info *rx_info;
163 	dma_addr_t dma;
164 
165 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
166 	dma = rx_info->dma;
167 	dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
168 				DMA_FROM_DEVICE);
169 }
170 
171 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
172 					  struct sk_buff *skb)
173 {
174 	struct rtw89_pci_rx_info *rx_info;
175 	dma_addr_t dma;
176 
177 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
178 	dma = rx_info->dma;
179 	dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
180 				   DMA_FROM_DEVICE);
181 }
182 
183 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
184 				       struct sk_buff *skb)
185 {
186 	struct rtw89_pci_rxbd_info *rxbd_info;
187 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
188 
189 	rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
190 	rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
191 	rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
192 	rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
193 	rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
194 }
195 
196 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
197 				     struct rtw89_pci_rx_ring *rx_ring,
198 				     struct sk_buff *skb)
199 {
200 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
201 	const struct rtw89_pci_info *info = rtwdev->pci_info;
202 	u32 target_rx_tag;
203 
204 	if (!info->check_rx_tag)
205 		return 0;
206 
207 	/* valid range is 1 ~ 0x1FFF */
208 	if (rx_ring->target_rx_tag == 0)
209 		target_rx_tag = 1;
210 	else
211 		target_rx_tag = rx_ring->target_rx_tag;
212 
213 	if (rx_info->tag != target_rx_tag) {
214 		rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
215 			    rx_info->tag, target_rx_tag);
216 		return -EAGAIN;
217 	}
218 
219 	return 0;
220 }
221 
222 static
223 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
224 						       struct rtw89_pci_rx_ring *rx_ring,
225 						       struct sk_buff *skb)
226 {
227 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
228 	int rx_tag_retry = 100;
229 	int ret;
230 
231 	do {
232 		rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
233 		rtw89_pci_rxbd_info_update(rtwdev, skb);
234 
235 		ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
236 		if (ret != -EAGAIN)
237 			break;
238 	} while (rx_tag_retry--);
239 
240 	/* update target rx_tag for next RX */
241 	rx_ring->target_rx_tag = rx_info->tag + 1;
242 
243 	return ret;
244 }
245 
246 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
247 {
248 	const struct rtw89_pci_info *info = rtwdev->pci_info;
249 	const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
250 	const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
251 
252 	if (enable) {
253 		rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
254 		if (dma_stop2->addr)
255 			rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
256 	} else {
257 		rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
258 		if (dma_stop2->addr)
259 			rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
260 	}
261 }
262 
263 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
264 {
265 	const struct rtw89_pci_info *info = rtwdev->pci_info;
266 	const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
267 
268 	if (enable)
269 		rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
270 	else
271 		rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
272 }
273 
274 static bool
275 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
276 		      struct sk_buff *new,
277 		      const struct sk_buff *skb, u32 offset,
278 		      const struct rtw89_pci_rx_info *rx_info,
279 		      const struct rtw89_rx_desc_info *desc_info)
280 {
281 	u32 copy_len = rx_info->len - offset;
282 
283 	if (unlikely(skb_tailroom(new) < copy_len)) {
284 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
285 			    "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
286 			    rx_info->len, desc_info->pkt_size, offset, fs, ls);
287 		rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
288 			       skb->data, rx_info->len);
289 		/* length of a single segment skb is desc_info->pkt_size */
290 		if (fs && ls) {
291 			copy_len = desc_info->pkt_size;
292 		} else {
293 			rtw89_info(rtwdev, "drop rx data due to invalid length\n");
294 			return false;
295 		}
296 	}
297 
298 	skb_put_data(new, skb->data + offset, copy_len);
299 
300 	return true;
301 }
302 
303 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
304 				    struct rtw89_pci_dma_ring *bd_ring)
305 {
306 	const struct rtw89_pci_info *info = rtwdev->pci_info;
307 	u32 wp = bd_ring->wp;
308 
309 	if (!info->rx_ring_eq_is_full)
310 		return wp;
311 
312 	if (++wp >= bd_ring->len)
313 		wp = 0;
314 
315 	return wp;
316 }
317 
318 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
319 				       struct rtw89_pci_rx_ring *rx_ring)
320 {
321 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
322 	struct rtw89_pci_rx_info *rx_info;
323 	struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
324 	struct sk_buff *new = rx_ring->diliver_skb;
325 	struct sk_buff *skb;
326 	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
327 	u32 skb_idx;
328 	u32 offset;
329 	u32 cnt = 1;
330 	bool fs, ls;
331 	int ret;
332 
333 	skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
334 	skb = rx_ring->buf[skb_idx];
335 
336 	ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
337 	if (ret) {
338 		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
339 			  bd_ring->wp, ret);
340 		goto err_sync_device;
341 	}
342 
343 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
344 	fs = rx_info->fs;
345 	ls = rx_info->ls;
346 
347 	if (fs) {
348 		if (new) {
349 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
350 				    "skb should not be ready before first segment start\n");
351 			goto err_sync_device;
352 		}
353 		if (desc_info->ready) {
354 			rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
355 			goto err_sync_device;
356 		}
357 
358 		rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
359 
360 		new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
361 		if (!new)
362 			goto err_sync_device;
363 
364 		rx_ring->diliver_skb = new;
365 
366 		/* first segment has RX desc */
367 		offset = desc_info->offset + desc_info->rxd_len;
368 	} else {
369 		offset = sizeof(struct rtw89_pci_rxbd_info);
370 		if (!new) {
371 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
372 			goto err_sync_device;
373 		}
374 	}
375 	if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
376 		goto err_sync_device;
377 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
378 	rtw89_pci_rxbd_increase(rx_ring, 1);
379 
380 	if (!desc_info->ready) {
381 		rtw89_warn(rtwdev, "no rx desc information\n");
382 		goto err_free_resource;
383 	}
384 	if (ls) {
385 		rtw89_core_rx(rtwdev, desc_info, new);
386 		rx_ring->diliver_skb = NULL;
387 		desc_info->ready = false;
388 	}
389 
390 	return cnt;
391 
392 err_sync_device:
393 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
394 	rtw89_pci_rxbd_increase(rx_ring, 1);
395 err_free_resource:
396 	if (new)
397 		dev_kfree_skb_any(new);
398 	rx_ring->diliver_skb = NULL;
399 	desc_info->ready = false;
400 
401 	return cnt;
402 }
403 
404 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
405 				   struct rtw89_pci_rx_ring *rx_ring,
406 				   u32 cnt)
407 {
408 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
409 	u32 rx_cnt;
410 
411 	while (cnt && rtwdev->napi_budget_countdown > 0) {
412 		rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
413 		if (!rx_cnt) {
414 			rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
415 
416 			/* skip the rest RXBD bufs */
417 			rtw89_pci_rxbd_increase(rx_ring, cnt);
418 			break;
419 		}
420 
421 		cnt -= rx_cnt;
422 	}
423 
424 	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
425 }
426 
427 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
428 				  struct rtw89_pci *rtwpci, int budget)
429 {
430 	struct rtw89_pci_rx_ring *rx_ring;
431 	int countdown = rtwdev->napi_budget_countdown;
432 	u32 cnt;
433 
434 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
435 
436 	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
437 	if (!cnt)
438 		return 0;
439 
440 	cnt = min_t(u32, budget, cnt);
441 
442 	rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
443 
444 	/* In case of flushing pending SKBs, the countdown may exceed. */
445 	if (rtwdev->napi_budget_countdown <= 0)
446 		return budget;
447 
448 	return budget - countdown;
449 }
450 
451 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
452 				struct rtw89_pci_tx_ring *tx_ring,
453 				struct sk_buff *skb, u8 tx_status)
454 {
455 	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
456 	struct ieee80211_tx_info *info;
457 
458 	rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
459 
460 	info = IEEE80211_SKB_CB(skb);
461 	ieee80211_tx_info_clear_status(info);
462 
463 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
464 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
465 	if (tx_status == RTW89_TX_DONE) {
466 		info->flags |= IEEE80211_TX_STAT_ACK;
467 		tx_ring->tx_acked++;
468 	} else {
469 		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
470 			rtw89_debug(rtwdev, RTW89_DBG_FW,
471 				    "failed to TX of status %x\n", tx_status);
472 		switch (tx_status) {
473 		case RTW89_TX_RETRY_LIMIT:
474 			tx_ring->tx_retry_lmt++;
475 			break;
476 		case RTW89_TX_LIFE_TIME:
477 			tx_ring->tx_life_time++;
478 			break;
479 		case RTW89_TX_MACID_DROP:
480 			tx_ring->tx_mac_id_drop++;
481 			break;
482 		default:
483 			rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
484 			break;
485 		}
486 	}
487 
488 	ieee80211_tx_status_ni(rtwdev->hw, skb);
489 }
490 
491 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
492 {
493 	struct rtw89_pci_tx_wd *txwd;
494 	u32 cnt;
495 
496 	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
497 	while (cnt--) {
498 		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
499 		if (!txwd) {
500 			rtw89_warn(rtwdev, "No busy txwd pages available\n");
501 			break;
502 		}
503 
504 		list_del_init(&txwd->list);
505 
506 		/* this skb has been freed by RPP */
507 		if (skb_queue_len(&txwd->queue) == 0)
508 			rtw89_pci_enqueue_txwd(tx_ring, txwd);
509 	}
510 }
511 
512 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
513 					struct rtw89_pci_tx_ring *tx_ring)
514 {
515 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
516 	struct rtw89_pci_tx_wd *txwd;
517 	int i;
518 
519 	for (i = 0; i < wd_ring->page_num; i++) {
520 		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
521 		if (!txwd)
522 			break;
523 
524 		list_del_init(&txwd->list);
525 	}
526 }
527 
528 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
529 				       struct rtw89_pci_tx_ring *tx_ring,
530 				       struct rtw89_pci_tx_wd *txwd, u16 seq,
531 				       u8 tx_status)
532 {
533 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
534 	struct rtw89_pci_tx_data *tx_data;
535 	struct sk_buff *skb, *tmp;
536 	u8 txch = tx_ring->txch;
537 
538 	if (!list_empty(&txwd->list)) {
539 		rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
540 		/* In low power mode, RPP can receive before updating of TX BD.
541 		 * In normal mode, it should not happen so give it a warning.
542 		 */
543 		if (!rtwpci->low_power && !list_empty(&txwd->list))
544 			rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
545 				   txch, seq);
546 	}
547 
548 	skb_queue_walk_safe(&txwd->queue, skb, tmp) {
549 		skb_unlink(skb, &txwd->queue);
550 
551 		tx_data = RTW89_PCI_TX_SKB_CB(skb);
552 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
553 				 DMA_TO_DEVICE);
554 
555 		rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
556 	}
557 
558 	if (list_empty(&txwd->list))
559 		rtw89_pci_enqueue_txwd(tx_ring, txwd);
560 }
561 
562 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
563 				  struct rtw89_pci_rpp_fmt *rpp)
564 {
565 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
566 	struct rtw89_pci_tx_ring *tx_ring;
567 	struct rtw89_pci_tx_wd_ring *wd_ring;
568 	struct rtw89_pci_tx_wd *txwd;
569 	u16 seq;
570 	u8 qsel, tx_status, txch;
571 
572 	seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
573 	qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
574 	tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
575 	txch = rtw89_core_get_ch_dma(rtwdev, qsel);
576 
577 	if (txch == RTW89_TXCH_CH12) {
578 		rtw89_warn(rtwdev, "should no fwcmd release report\n");
579 		return;
580 	}
581 
582 	tx_ring = &rtwpci->tx_rings[txch];
583 	wd_ring = &tx_ring->wd_ring;
584 	txwd = &wd_ring->pages[seq];
585 
586 	rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
587 }
588 
589 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
590 					       struct rtw89_pci_tx_ring *tx_ring)
591 {
592 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
593 	struct rtw89_pci_tx_wd *txwd;
594 	int i;
595 
596 	for (i = 0; i < wd_ring->page_num; i++) {
597 		txwd = &wd_ring->pages[i];
598 
599 		if (!list_empty(&txwd->list))
600 			continue;
601 
602 		rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
603 	}
604 }
605 
606 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
607 				     struct rtw89_pci_rx_ring *rx_ring,
608 				     u32 max_cnt)
609 {
610 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
611 	struct rtw89_pci_rx_info *rx_info;
612 	struct rtw89_pci_rpp_fmt *rpp;
613 	struct rtw89_rx_desc_info desc_info = {};
614 	struct sk_buff *skb;
615 	u32 cnt = 0;
616 	u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
617 	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
618 	u32 skb_idx;
619 	u32 offset;
620 	int ret;
621 
622 	skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
623 	skb = rx_ring->buf[skb_idx];
624 
625 	ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
626 	if (ret) {
627 		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
628 			  bd_ring->wp, ret);
629 		goto err_sync_device;
630 	}
631 
632 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
633 	if (!rx_info->fs || !rx_info->ls) {
634 		rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
635 		return cnt;
636 	}
637 
638 	rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
639 
640 	/* first segment has RX desc */
641 	offset = desc_info.offset + desc_info.rxd_len;
642 	for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
643 		rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
644 		rtw89_pci_release_rpp(rtwdev, rpp);
645 	}
646 
647 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
648 	rtw89_pci_rxbd_increase(rx_ring, 1);
649 	cnt++;
650 
651 	return cnt;
652 
653 err_sync_device:
654 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
655 	return 0;
656 }
657 
658 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
659 				 struct rtw89_pci_rx_ring *rx_ring,
660 				 u32 cnt)
661 {
662 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
663 	u32 release_cnt;
664 
665 	while (cnt) {
666 		release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
667 		if (!release_cnt) {
668 			rtw89_err(rtwdev, "failed to release TX skbs\n");
669 
670 			/* skip the rest RXBD bufs */
671 			rtw89_pci_rxbd_increase(rx_ring, cnt);
672 			break;
673 		}
674 
675 		cnt -= release_cnt;
676 	}
677 
678 	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
679 }
680 
681 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
682 				  struct rtw89_pci *rtwpci, int budget)
683 {
684 	struct rtw89_pci_rx_ring *rx_ring;
685 	u32 cnt;
686 	int work_done;
687 
688 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
689 
690 	spin_lock_bh(&rtwpci->trx_lock);
691 
692 	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
693 	if (cnt == 0)
694 		goto out_unlock;
695 
696 	rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
697 
698 out_unlock:
699 	spin_unlock_bh(&rtwpci->trx_lock);
700 
701 	/* always release all RPQ */
702 	work_done = min_t(int, cnt, budget);
703 	rtwdev->napi_budget_countdown -= work_done;
704 
705 	return work_done;
706 }
707 
708 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
709 				      struct rtw89_pci *rtwpci)
710 {
711 	struct rtw89_pci_rx_ring *rx_ring;
712 	struct rtw89_pci_dma_ring *bd_ring;
713 	u32 reg_idx;
714 	u16 hw_idx, hw_idx_next, host_idx;
715 	int i;
716 
717 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
718 		rx_ring = &rtwpci->rx_rings[i];
719 		bd_ring = &rx_ring->bd_ring;
720 
721 		reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
722 		hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
723 		host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
724 		hw_idx_next = (hw_idx + 1) % bd_ring->len;
725 
726 		if (hw_idx_next == host_idx)
727 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
728 
729 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
730 			    "%d RXD unavailable, idx=0x%08x, len=%d\n",
731 			    i, reg_idx, bd_ring->len);
732 	}
733 }
734 
735 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
736 			       struct rtw89_pci *rtwpci,
737 			       struct rtw89_pci_isrs *isrs)
738 {
739 	isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
740 	isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
741 	isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
742 
743 	rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
744 	rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
745 	rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
746 }
747 EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
748 
749 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
750 				  struct rtw89_pci *rtwpci,
751 				  struct rtw89_pci_isrs *isrs)
752 {
753 	isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
754 	isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
755 			      rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
756 	isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
757 			rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
758 	isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
759 			rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
760 
761 	if (isrs->halt_c2h_isrs)
762 		rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
763 	if (isrs->isrs[0])
764 		rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
765 	if (isrs->isrs[1])
766 		rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
767 }
768 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
769 
770 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
771 				  struct rtw89_pci *rtwpci,
772 				  struct rtw89_pci_isrs *isrs)
773 {
774 	isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
775 	isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
776 			      rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
777 	isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
778 			rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
779 	isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
780 
781 	if (isrs->halt_c2h_isrs)
782 		rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
783 	if (isrs->isrs[0])
784 		rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]);
785 	if (isrs->isrs[1])
786 		rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
787 	rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
788 }
789 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
790 
791 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
792 {
793 	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
794 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
795 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
796 }
797 EXPORT_SYMBOL(rtw89_pci_enable_intr);
798 
799 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
800 {
801 	rtw89_write32(rtwdev, R_AX_HIMR0, 0);
802 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
803 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
804 }
805 EXPORT_SYMBOL(rtw89_pci_disable_intr);
806 
807 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
808 {
809 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
810 	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
811 	rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
812 	rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
813 }
814 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
815 
816 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
817 {
818 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
819 }
820 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
821 
822 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
823 {
824 	rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
825 	rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]);
826 	rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
827 	rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
828 }
829 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2);
830 
831 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
832 {
833 	rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
834 	rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
835 }
836 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
837 
838 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
839 {
840 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
841 	unsigned long flags;
842 
843 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
844 	rtw89_chip_disable_intr(rtwdev, rtwpci);
845 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
846 	rtw89_chip_enable_intr(rtwdev, rtwpci);
847 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
848 }
849 
850 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
851 {
852 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
853 	unsigned long flags;
854 
855 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
856 	rtw89_chip_disable_intr(rtwdev, rtwpci);
857 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
858 	rtw89_chip_enable_intr(rtwdev, rtwpci);
859 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
860 }
861 
862 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
863 {
864 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
865 	int budget = NAPI_POLL_WEIGHT;
866 
867 	/* To prevent RXQ get stuck due to run out of budget. */
868 	rtwdev->napi_budget_countdown = budget;
869 
870 	rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
871 	rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
872 }
873 
874 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
875 {
876 	struct rtw89_dev *rtwdev = dev;
877 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
878 	const struct rtw89_pci_info *info = rtwdev->pci_info;
879 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
880 	struct rtw89_pci_isrs isrs;
881 	unsigned long flags;
882 
883 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
884 	rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
885 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
886 
887 	if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
888 		rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
889 
890 	if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
891 		rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
892 
893 	if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
894 		rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
895 
896 	if (unlikely(rtwpci->under_recovery))
897 		goto enable_intr;
898 
899 	if (unlikely(rtwpci->low_power)) {
900 		rtw89_pci_low_power_interrupt_handler(rtwdev);
901 		goto enable_intr;
902 	}
903 
904 	if (likely(rtwpci->running)) {
905 		local_bh_disable();
906 		napi_schedule(&rtwdev->napi);
907 		local_bh_enable();
908 	}
909 
910 	return IRQ_HANDLED;
911 
912 enable_intr:
913 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
914 	if (likely(rtwpci->running))
915 		rtw89_chip_enable_intr(rtwdev, rtwpci);
916 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
917 	return IRQ_HANDLED;
918 }
919 
920 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
921 {
922 	struct rtw89_dev *rtwdev = dev;
923 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
924 	unsigned long flags;
925 	irqreturn_t irqret = IRQ_WAKE_THREAD;
926 
927 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
928 
929 	/* If interrupt event is on the road, it is still trigger interrupt
930 	 * even we have done pci_stop() to turn off IMR.
931 	 */
932 	if (unlikely(!rtwpci->running)) {
933 		irqret = IRQ_HANDLED;
934 		goto exit;
935 	}
936 
937 	rtw89_chip_disable_intr(rtwdev, rtwpci);
938 exit:
939 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
940 
941 	return irqret;
942 }
943 
944 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
945 	[RTW89_TXCH_##ch_idx] = { \
946 		.num = R_##gen##_##txch##_TXBD_NUM ##v, \
947 		.idx = R_##gen##_##txch##_TXBD_IDX ##v, \
948 		.bdram = 0, \
949 		.desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \
950 		.desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \
951 	}
952 
953 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
954 	[RTW89_TXCH_##txch] = { \
955 		.num = R_AX_##txch##_TXBD_NUM ##v, \
956 		.idx = R_AX_##txch##_TXBD_IDX ##v, \
957 		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
958 		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
959 		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
960 	}
961 
962 #define DEF_TXCHADDRS(info, txch, v...) \
963 	[RTW89_TXCH_##txch] = { \
964 		.num = R_AX_##txch##_TXBD_NUM, \
965 		.idx = R_AX_##txch##_TXBD_IDX, \
966 		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
967 		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
968 		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
969 	}
970 
971 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
972 	[RTW89_RXCH_##ch_idx] = { \
973 		.num = R_##gen##_##rxch##_RXBD_NUM ##v, \
974 		.idx = R_##gen##_##rxch##_RXBD_IDX ##v, \
975 		.desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \
976 		.desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \
977 	}
978 
979 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
980 	.tx = {
981 		DEF_TXCHADDRS(info, ACH0),
982 		DEF_TXCHADDRS(info, ACH1),
983 		DEF_TXCHADDRS(info, ACH2),
984 		DEF_TXCHADDRS(info, ACH3),
985 		DEF_TXCHADDRS(info, ACH4),
986 		DEF_TXCHADDRS(info, ACH5),
987 		DEF_TXCHADDRS(info, ACH6),
988 		DEF_TXCHADDRS(info, ACH7),
989 		DEF_TXCHADDRS(info, CH8),
990 		DEF_TXCHADDRS(info, CH9),
991 		DEF_TXCHADDRS_TYPE1(info, CH10),
992 		DEF_TXCHADDRS_TYPE1(info, CH11),
993 		DEF_TXCHADDRS(info, CH12),
994 	},
995 	.rx = {
996 		DEF_RXCHADDRS(AX, RXQ, RXQ),
997 		DEF_RXCHADDRS(AX, RPQ, RPQ),
998 	},
999 };
1000 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
1001 
1002 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
1003 	.tx = {
1004 		DEF_TXCHADDRS(info, ACH0, _V1),
1005 		DEF_TXCHADDRS(info, ACH1, _V1),
1006 		DEF_TXCHADDRS(info, ACH2, _V1),
1007 		DEF_TXCHADDRS(info, ACH3, _V1),
1008 		DEF_TXCHADDRS(info, ACH4, _V1),
1009 		DEF_TXCHADDRS(info, ACH5, _V1),
1010 		DEF_TXCHADDRS(info, ACH6, _V1),
1011 		DEF_TXCHADDRS(info, ACH7, _V1),
1012 		DEF_TXCHADDRS(info, CH8, _V1),
1013 		DEF_TXCHADDRS(info, CH9, _V1),
1014 		DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
1015 		DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
1016 		DEF_TXCHADDRS(info, CH12, _V1),
1017 	},
1018 	.rx = {
1019 		DEF_RXCHADDRS(AX, RXQ, RXQ, _V1),
1020 		DEF_RXCHADDRS(AX, RPQ, RPQ, _V1),
1021 	},
1022 };
1023 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
1024 
1025 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
1026 	.tx = {
1027 		DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1),
1028 		DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1),
1029 		DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1),
1030 		DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1),
1031 		DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1),
1032 		DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1),
1033 		DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1),
1034 		DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1),
1035 		DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1),
1036 		DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1),
1037 		DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1),
1038 		DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1),
1039 		DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1),
1040 	},
1041 	.rx = {
1042 		DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1),
1043 		DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1),
1044 	},
1045 };
1046 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
1047 
1048 #undef DEF_TXCHADDRS_TYPE1
1049 #undef DEF_TXCHADDRS
1050 #undef DEF_RXCHADDRS
1051 
1052 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
1053 				    enum rtw89_tx_channel txch,
1054 				    const struct rtw89_pci_ch_dma_addr **addr)
1055 {
1056 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1057 
1058 	if (txch >= RTW89_TXCH_NUM)
1059 		return -EINVAL;
1060 
1061 	*addr = &info->dma_addr_set->tx[txch];
1062 
1063 	return 0;
1064 }
1065 
1066 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
1067 				    enum rtw89_rx_channel rxch,
1068 				    const struct rtw89_pci_ch_dma_addr **addr)
1069 {
1070 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1071 
1072 	if (rxch >= RTW89_RXCH_NUM)
1073 		return -EINVAL;
1074 
1075 	*addr = &info->dma_addr_set->rx[rxch];
1076 
1077 	return 0;
1078 }
1079 
1080 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
1081 {
1082 	struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
1083 
1084 	/* reserved 1 desc check ring is full or not */
1085 	if (bd_ring->rp > bd_ring->wp)
1086 		return bd_ring->rp - bd_ring->wp - 1;
1087 
1088 	return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
1089 }
1090 
1091 static
1092 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
1093 {
1094 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1095 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
1096 	u32 cnt;
1097 
1098 	spin_lock_bh(&rtwpci->trx_lock);
1099 	rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
1100 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1101 	spin_unlock_bh(&rtwpci->trx_lock);
1102 
1103 	return cnt;
1104 }
1105 
1106 static
1107 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
1108 						   u8 txch)
1109 {
1110 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1111 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1112 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1113 	u32 cnt;
1114 
1115 	spin_lock_bh(&rtwpci->trx_lock);
1116 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1117 	cnt = min(cnt, wd_ring->curr_num);
1118 	spin_unlock_bh(&rtwpci->trx_lock);
1119 
1120 	return cnt;
1121 }
1122 
1123 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1124 						     u8 txch)
1125 {
1126 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1127 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1128 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1129 	const struct rtw89_chip_info *chip = rtwdev->chip;
1130 	u32 bd_cnt, wd_cnt, min_cnt = 0;
1131 	struct rtw89_pci_rx_ring *rx_ring;
1132 	enum rtw89_debug_mask debug_mask;
1133 	u32 cnt;
1134 
1135 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
1136 
1137 	spin_lock_bh(&rtwpci->trx_lock);
1138 	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1139 	wd_cnt = wd_ring->curr_num;
1140 
1141 	if (wd_cnt == 0 || bd_cnt == 0) {
1142 		cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
1143 		if (cnt)
1144 			rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
1145 		else if (wd_cnt == 0)
1146 			goto out_unlock;
1147 
1148 		bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1149 		if (bd_cnt == 0)
1150 			rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
1151 	}
1152 
1153 	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1154 	wd_cnt = wd_ring->curr_num;
1155 	min_cnt = min(bd_cnt, wd_cnt);
1156 	if (min_cnt == 0) {
1157 		/* This message can be frequently shown in low power mode or
1158 		 * high traffic with small FIFO chips, and we have recognized it as normal
1159 		 * behavior, so print with mask RTW89_DBG_TXRX in these situations.
1160 		 */
1161 		if (rtwpci->low_power || chip->small_fifo_size)
1162 			debug_mask = RTW89_DBG_TXRX;
1163 		else
1164 			debug_mask = RTW89_DBG_UNEXP;
1165 
1166 		rtw89_debug(rtwdev, debug_mask,
1167 			    "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
1168 			    wd_cnt, bd_cnt);
1169 	}
1170 
1171 out_unlock:
1172 	spin_unlock_bh(&rtwpci->trx_lock);
1173 
1174 	return min_cnt;
1175 }
1176 
1177 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1178 						   u8 txch)
1179 {
1180 	if (rtwdev->hci.paused)
1181 		return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
1182 
1183 	if (txch == RTW89_TXCH_CH12)
1184 		return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
1185 
1186 	return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
1187 }
1188 
1189 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
1190 {
1191 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1192 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1193 	u32 host_idx, addr;
1194 
1195 	spin_lock_bh(&rtwpci->trx_lock);
1196 
1197 	addr = bd_ring->addr.idx;
1198 	host_idx = bd_ring->wp;
1199 	rtw89_write16(rtwdev, addr, host_idx);
1200 
1201 	spin_unlock_bh(&rtwpci->trx_lock);
1202 }
1203 
1204 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1205 					int n_txbd)
1206 {
1207 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1208 	u32 host_idx, len;
1209 
1210 	len = bd_ring->len;
1211 	host_idx = bd_ring->wp + n_txbd;
1212 	host_idx = host_idx < len ? host_idx : host_idx - len;
1213 
1214 	bd_ring->wp = host_idx;
1215 }
1216 
1217 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1218 {
1219 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1220 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1221 
1222 	if (rtwdev->hci.paused) {
1223 		set_bit(txch, rtwpci->kick_map);
1224 		return;
1225 	}
1226 
1227 	__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1228 }
1229 
1230 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1231 {
1232 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1233 	struct rtw89_pci_tx_ring *tx_ring;
1234 	int txch;
1235 
1236 	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1237 		if (!test_and_clear_bit(txch, rtwpci->kick_map))
1238 			continue;
1239 
1240 		tx_ring = &rtwpci->tx_rings[txch];
1241 		__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1242 	}
1243 }
1244 
1245 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1246 {
1247 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1248 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1249 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1250 	u32 cur_idx, cur_rp;
1251 	u8 i;
1252 
1253 	/* Because the time taked by the I/O is a bit dynamic, it's hard to
1254 	 * define a reasonable fixed total timeout to use read_poll_timeout*
1255 	 * helper. Instead, we can ensure a reasonable polling times, so we
1256 	 * just use for loop with udelay here.
1257 	 */
1258 	for (i = 0; i < 60; i++) {
1259 		cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
1260 		cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1261 		if (cur_rp == bd_ring->wp)
1262 			return;
1263 
1264 		udelay(1);
1265 	}
1266 
1267 	if (!drop)
1268 		rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1269 }
1270 
1271 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1272 					bool drop)
1273 {
1274 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1275 	u8 i;
1276 
1277 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1278 		/* It may be unnecessary to flush FWCMD queue. */
1279 		if (i == RTW89_TXCH_CH12)
1280 			continue;
1281 		if (info->tx_dma_ch_mask & BIT(i))
1282 			continue;
1283 
1284 		if (txchs & BIT(i))
1285 			__pci_flush_txch(rtwdev, i, drop);
1286 	}
1287 }
1288 
1289 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1290 				       bool drop)
1291 {
1292 	__rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1293 }
1294 
1295 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1296 			       void *txaddr_info_addr, u32 total_len,
1297 			       dma_addr_t dma, u8 *add_info_nr)
1298 {
1299 	struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1300 
1301 	txaddr_info->length = cpu_to_le16(total_len);
1302 	txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
1303 					  RTW89_PCI_ADDR_NUM(1));
1304 	txaddr_info->dma = cpu_to_le32(dma);
1305 
1306 	*add_info_nr = 1;
1307 
1308 	return sizeof(*txaddr_info);
1309 }
1310 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1311 
1312 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1313 				  void *txaddr_info_addr, u32 total_len,
1314 				  dma_addr_t dma, u8 *add_info_nr)
1315 {
1316 	struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1317 	u32 remain = total_len;
1318 	u32 len;
1319 	u16 length_option;
1320 	int n;
1321 
1322 	for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1323 		len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1324 		      TXADDR_INFO_LENTHG_V1_MAX : remain;
1325 		remain -= len;
1326 
1327 		length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1328 				FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1329 				FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1330 		txaddr_info->length_opt = cpu_to_le16(length_option);
1331 		txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1332 		txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1333 
1334 		dma += len;
1335 		txaddr_info++;
1336 	}
1337 
1338 	WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1339 		  remain, total_len);
1340 
1341 	*add_info_nr = n;
1342 
1343 	return n * sizeof(*txaddr_info);
1344 }
1345 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1346 
1347 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1348 				 struct rtw89_pci_tx_ring *tx_ring,
1349 				 struct rtw89_pci_tx_wd *txwd,
1350 				 struct rtw89_core_tx_request *tx_req)
1351 {
1352 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1353 	const struct rtw89_chip_info *chip = rtwdev->chip;
1354 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1355 	struct rtw89_pci_tx_wp_info *txwp_info;
1356 	void *txaddr_info_addr;
1357 	struct pci_dev *pdev = rtwpci->pdev;
1358 	struct sk_buff *skb = tx_req->skb;
1359 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1360 	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
1361 	bool en_wd_info = desc_info->en_wd_info;
1362 	u32 txwd_len;
1363 	u32 txwp_len;
1364 	u32 txaddr_info_len;
1365 	dma_addr_t dma;
1366 	int ret;
1367 
1368 	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1369 	if (dma_mapping_error(&pdev->dev, dma)) {
1370 		rtw89_err(rtwdev, "failed to map skb dma data\n");
1371 		ret = -EBUSY;
1372 		goto err;
1373 	}
1374 
1375 	tx_data->dma = dma;
1376 	rcu_assign_pointer(skb_data->wait, NULL);
1377 
1378 	txwp_len = sizeof(*txwp_info);
1379 	txwd_len = chip->txwd_body_size;
1380 	txwd_len += en_wd_info ? chip->txwd_info_size : 0;
1381 
1382 	txwp_info = txwd->vaddr + txwd_len;
1383 	txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1384 	txwp_info->seq1 = 0;
1385 	txwp_info->seq2 = 0;
1386 	txwp_info->seq3 = 0;
1387 
1388 	tx_ring->tx_cnt++;
1389 	txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1390 	txaddr_info_len =
1391 		rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
1392 					    dma, &desc_info->addr_info_nr);
1393 
1394 	txwd->len = txwd_len + txwp_len + txaddr_info_len;
1395 
1396 	rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
1397 
1398 	skb_queue_tail(&txwd->queue, skb);
1399 
1400 	return 0;
1401 
1402 err:
1403 	return ret;
1404 }
1405 
1406 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1407 				  struct rtw89_pci_tx_ring *tx_ring,
1408 				  struct rtw89_pci_tx_bd_32 *txbd,
1409 				  struct rtw89_core_tx_request *tx_req)
1410 {
1411 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1412 	const struct rtw89_chip_info *chip = rtwdev->chip;
1413 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1414 	void *txdesc;
1415 	int txdesc_size = chip->h2c_desc_size;
1416 	struct pci_dev *pdev = rtwpci->pdev;
1417 	struct sk_buff *skb = tx_req->skb;
1418 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1419 	dma_addr_t dma;
1420 
1421 	txdesc = skb_push(skb, txdesc_size);
1422 	memset(txdesc, 0, txdesc_size);
1423 	rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1424 
1425 	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1426 	if (dma_mapping_error(&pdev->dev, dma)) {
1427 		rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1428 		return -EBUSY;
1429 	}
1430 
1431 	tx_data->dma = dma;
1432 	txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1433 	txbd->length = cpu_to_le16(skb->len);
1434 	txbd->dma = cpu_to_le32(tx_data->dma);
1435 	skb_queue_tail(&rtwpci->h2c_queue, skb);
1436 
1437 	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1438 
1439 	return 0;
1440 }
1441 
1442 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1443 				 struct rtw89_pci_tx_ring *tx_ring,
1444 				 struct rtw89_pci_tx_bd_32 *txbd,
1445 				 struct rtw89_core_tx_request *tx_req)
1446 {
1447 	struct rtw89_pci_tx_wd *txwd;
1448 	int ret;
1449 
1450 	/* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1451 	 * buffer with WD BODY only. So here we don't need to check the free
1452 	 * pages of the wd ring.
1453 	 */
1454 	if (tx_ring->txch == RTW89_TXCH_CH12)
1455 		return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1456 
1457 	txwd = rtw89_pci_dequeue_txwd(tx_ring);
1458 	if (!txwd) {
1459 		rtw89_err(rtwdev, "no available TXWD\n");
1460 		ret = -ENOSPC;
1461 		goto err;
1462 	}
1463 
1464 	ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1465 	if (ret) {
1466 		rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1467 		goto err_enqueue_wd;
1468 	}
1469 
1470 	list_add_tail(&txwd->list, &tx_ring->busy_pages);
1471 
1472 	txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
1473 	txbd->length = cpu_to_le16(txwd->len);
1474 	txbd->dma = cpu_to_le32(txwd->paddr);
1475 
1476 	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1477 
1478 	return 0;
1479 
1480 err_enqueue_wd:
1481 	rtw89_pci_enqueue_txwd(tx_ring, txwd);
1482 err:
1483 	return ret;
1484 }
1485 
1486 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1487 			      u8 txch)
1488 {
1489 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1490 	struct rtw89_pci_tx_ring *tx_ring;
1491 	struct rtw89_pci_tx_bd_32 *txbd;
1492 	u32 n_avail_txbd;
1493 	int ret = 0;
1494 
1495 	/* check the tx type and dma channel for fw cmd queue */
1496 	if ((txch == RTW89_TXCH_CH12 ||
1497 	     tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1498 	    (txch != RTW89_TXCH_CH12 ||
1499 	     tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1500 		rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1501 		return -EINVAL;
1502 	}
1503 
1504 	tx_ring = &rtwpci->tx_rings[txch];
1505 	spin_lock_bh(&rtwpci->trx_lock);
1506 
1507 	n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1508 	if (n_avail_txbd == 0) {
1509 		rtw89_err(rtwdev, "no available TXBD\n");
1510 		ret = -ENOSPC;
1511 		goto err_unlock;
1512 	}
1513 
1514 	txbd = rtw89_pci_get_next_txbd(tx_ring);
1515 	ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1516 	if (ret) {
1517 		rtw89_err(rtwdev, "failed to submit TXBD\n");
1518 		goto err_unlock;
1519 	}
1520 
1521 	spin_unlock_bh(&rtwpci->trx_lock);
1522 	return 0;
1523 
1524 err_unlock:
1525 	spin_unlock_bh(&rtwpci->trx_lock);
1526 	return ret;
1527 }
1528 
1529 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1530 {
1531 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1532 	int ret;
1533 
1534 	ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1535 	if (ret) {
1536 		rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1537 		return ret;
1538 	}
1539 
1540 	return 0;
1541 }
1542 
1543 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
1544 	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1545 	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1546 	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1547 	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1548 	[RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1549 	[RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1550 	[RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1551 	[RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1552 	[RTW89_TXCH_CH8]  = {.start_idx = 40, .max_num = 5, .min_num = 1},
1553 	[RTW89_TXCH_CH9]  = {.start_idx = 45, .max_num = 5, .min_num = 1},
1554 	[RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1555 	[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1556 	[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1557 };
1558 EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
1559 
1560 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
1561 	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1562 	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1563 	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1564 	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1565 	[RTW89_TXCH_CH8]  = {.start_idx = 20, .max_num = 4, .min_num = 1},
1566 	[RTW89_TXCH_CH9]  = {.start_idx = 24, .max_num = 4, .min_num = 1},
1567 	[RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
1568 };
1569 EXPORT_SYMBOL(rtw89_bd_ram_table_single);
1570 
1571 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1572 {
1573 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1574 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1575 	const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
1576 	struct rtw89_pci_tx_ring *tx_ring;
1577 	struct rtw89_pci_rx_ring *rx_ring;
1578 	struct rtw89_pci_dma_ring *bd_ring;
1579 	const struct rtw89_pci_bd_ram *bd_ram;
1580 	u32 addr_num;
1581 	u32 addr_idx;
1582 	u32 addr_bdram;
1583 	u32 addr_desa_l;
1584 	u32 val32;
1585 	int i;
1586 
1587 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1588 		if (info->tx_dma_ch_mask & BIT(i))
1589 			continue;
1590 
1591 		tx_ring = &rtwpci->tx_rings[i];
1592 		bd_ring = &tx_ring->bd_ring;
1593 		bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
1594 		addr_num = bd_ring->addr.num;
1595 		addr_bdram = bd_ring->addr.bdram;
1596 		addr_desa_l = bd_ring->addr.desa_l;
1597 		bd_ring->wp = 0;
1598 		bd_ring->rp = 0;
1599 
1600 		rtw89_write16(rtwdev, addr_num, bd_ring->len);
1601 		if (addr_bdram && bd_ram) {
1602 			val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1603 				FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1604 				FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1605 
1606 			rtw89_write32(rtwdev, addr_bdram, val32);
1607 		}
1608 		rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1609 	}
1610 
1611 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1612 		rx_ring = &rtwpci->rx_rings[i];
1613 		bd_ring = &rx_ring->bd_ring;
1614 		addr_num = bd_ring->addr.num;
1615 		addr_idx = bd_ring->addr.idx;
1616 		addr_desa_l = bd_ring->addr.desa_l;
1617 		if (info->rx_ring_eq_is_full)
1618 			bd_ring->wp = bd_ring->len - 1;
1619 		else
1620 			bd_ring->wp = 0;
1621 		bd_ring->rp = 0;
1622 		rx_ring->diliver_skb = NULL;
1623 		rx_ring->diliver_desc.ready = false;
1624 		rx_ring->target_rx_tag = 0;
1625 
1626 		rtw89_write16(rtwdev, addr_num, bd_ring->len);
1627 		rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1628 
1629 		if (info->rx_ring_eq_is_full)
1630 			rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
1631 	}
1632 }
1633 
1634 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1635 				      struct rtw89_pci_tx_ring *tx_ring)
1636 {
1637 	rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1638 	rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1639 }
1640 
1641 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1642 {
1643 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1644 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1645 	int txch;
1646 
1647 	rtw89_pci_reset_trx_rings(rtwdev);
1648 
1649 	spin_lock_bh(&rtwpci->trx_lock);
1650 	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1651 		if (info->tx_dma_ch_mask & BIT(txch))
1652 			continue;
1653 		if (txch == RTW89_TXCH_CH12) {
1654 			rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1655 						skb_queue_len(&rtwpci->h2c_queue), true);
1656 			continue;
1657 		}
1658 		rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
1659 	}
1660 	spin_unlock_bh(&rtwpci->trx_lock);
1661 }
1662 
1663 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1664 {
1665 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1666 	unsigned long flags;
1667 
1668 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1669 	rtwpci->running = true;
1670 	rtw89_chip_enable_intr(rtwdev, rtwpci);
1671 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1672 }
1673 
1674 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1675 {
1676 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1677 	unsigned long flags;
1678 
1679 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1680 	rtwpci->running = false;
1681 	rtw89_chip_disable_intr(rtwdev, rtwpci);
1682 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1683 }
1684 
1685 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1686 {
1687 	rtw89_core_napi_start(rtwdev);
1688 	rtw89_pci_enable_intr_lock(rtwdev);
1689 
1690 	return 0;
1691 }
1692 
1693 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1694 {
1695 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1696 	struct pci_dev *pdev = rtwpci->pdev;
1697 
1698 	rtw89_pci_disable_intr_lock(rtwdev);
1699 	synchronize_irq(pdev->irq);
1700 	rtw89_core_napi_stop(rtwdev);
1701 }
1702 
1703 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1704 {
1705 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1706 	struct pci_dev *pdev = rtwpci->pdev;
1707 
1708 	if (pause) {
1709 		rtw89_pci_disable_intr_lock(rtwdev);
1710 		synchronize_irq(pdev->irq);
1711 		if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1712 			napi_synchronize(&rtwdev->napi);
1713 	} else {
1714 		rtw89_pci_enable_intr_lock(rtwdev);
1715 		rtw89_pci_tx_kick_off_pending(rtwdev);
1716 	}
1717 }
1718 
1719 static
1720 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1721 {
1722 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1723 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1724 	const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1725 	const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1726 	struct rtw89_pci_tx_ring *tx_ring;
1727 	struct rtw89_pci_rx_ring *rx_ring;
1728 	int i;
1729 
1730 	if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1731 		return;
1732 
1733 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1734 		tx_ring = &rtwpci->tx_rings[i];
1735 		tx_ring->bd_ring.addr.idx = low_power ?
1736 					    bd_idx_addr->tx_bd_addrs[i] :
1737 					    dma_addr_set->tx[i].idx;
1738 	}
1739 
1740 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1741 		rx_ring = &rtwpci->rx_rings[i];
1742 		rx_ring->bd_ring.addr.idx = low_power ?
1743 					    bd_idx_addr->rx_bd_addrs[i] :
1744 					    dma_addr_set->rx[i].idx;
1745 	}
1746 }
1747 
1748 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1749 {
1750 	enum rtw89_pci_intr_mask_cfg cfg;
1751 
1752 	WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1753 
1754 	cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1755 	rtw89_chip_config_intr_mask(rtwdev, cfg);
1756 	rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1757 }
1758 
1759 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1760 
1761 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1762 {
1763 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1764 	u32 val = readl(rtwpci->mmap + addr);
1765 	int count;
1766 
1767 	for (count = 0; ; count++) {
1768 		if (val != RTW89_R32_DEAD)
1769 			return val;
1770 		if (count >= MAC_REG_POOL_COUNT) {
1771 			rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1772 			return RTW89_R32_DEAD;
1773 		}
1774 		rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
1775 		val = readl(rtwpci->mmap + addr);
1776 	}
1777 
1778 	return val;
1779 }
1780 
1781 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
1782 {
1783 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1784 	u32 addr32, val32, shift;
1785 
1786 	if (!ACCESS_CMAC(addr))
1787 		return readb(rtwpci->mmap + addr);
1788 
1789 	addr32 = addr & ~0x3;
1790 	shift = (addr & 0x3) * 8;
1791 	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1792 	return val32 >> shift;
1793 }
1794 
1795 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
1796 {
1797 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1798 	u32 addr32, val32, shift;
1799 
1800 	if (!ACCESS_CMAC(addr))
1801 		return readw(rtwpci->mmap + addr);
1802 
1803 	addr32 = addr & ~0x3;
1804 	shift = (addr & 0x3) * 8;
1805 	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1806 	return val32 >> shift;
1807 }
1808 
1809 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
1810 {
1811 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1812 
1813 	if (!ACCESS_CMAC(addr))
1814 		return readl(rtwpci->mmap + addr);
1815 
1816 	return rtw89_pci_ops_read32_cmac(rtwdev, addr);
1817 }
1818 
1819 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
1820 {
1821 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1822 
1823 	writeb(data, rtwpci->mmap + addr);
1824 }
1825 
1826 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
1827 {
1828 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1829 
1830 	writew(data, rtwpci->mmap + addr);
1831 }
1832 
1833 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
1834 {
1835 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1836 
1837 	writel(data, rtwpci->mmap + addr);
1838 }
1839 
1840 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
1841 {
1842 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1843 
1844 	if (enable)
1845 		rtw89_write32_set(rtwdev, info->init_cfg_reg,
1846 				  info->rxhci_en_bit | info->txhci_en_bit);
1847 	else
1848 		rtw89_write32_clr(rtwdev, info->init_cfg_reg,
1849 				  info->rxhci_en_bit | info->txhci_en_bit);
1850 }
1851 
1852 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
1853 {
1854 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1855 	const struct rtw89_reg_def *reg = &info->dma_io_stop;
1856 
1857 	if (enable)
1858 		rtw89_write32_clr(rtwdev, reg->addr, reg->mask);
1859 	else
1860 		rtw89_write32_set(rtwdev, reg->addr, reg->mask);
1861 }
1862 
1863 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
1864 {
1865 	rtw89_pci_ctrl_dma_io(rtwdev, enable);
1866 	rtw89_pci_ctrl_dma_trx(rtwdev, enable);
1867 }
1868 
1869 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
1870 {
1871 	u16 val;
1872 
1873 	rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
1874 
1875 	val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
1876 	switch (speed) {
1877 	case PCIE_PHY_GEN1:
1878 		if (addr < 0x20)
1879 			val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
1880 		else
1881 			val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
1882 		break;
1883 	case PCIE_PHY_GEN2:
1884 		if (addr < 0x20)
1885 			val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
1886 		else
1887 			val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
1888 		break;
1889 	default:
1890 		rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
1891 		return -EINVAL;
1892 	}
1893 	rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
1894 	rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
1895 
1896 	return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
1897 				 false, rtwdev, R_AX_MDIO_CFG);
1898 }
1899 
1900 static int
1901 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
1902 {
1903 	int ret;
1904 
1905 	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
1906 	if (ret) {
1907 		rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
1908 		return ret;
1909 	}
1910 	*val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
1911 
1912 	return 0;
1913 }
1914 
1915 static int
1916 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
1917 {
1918 	int ret;
1919 
1920 	rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
1921 	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
1922 	if (ret) {
1923 		rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
1924 		return ret;
1925 	}
1926 
1927 	return 0;
1928 }
1929 
1930 static int
1931 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
1932 {
1933 	u32 shift;
1934 	int ret;
1935 	u16 val;
1936 
1937 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1938 	if (ret)
1939 		return ret;
1940 
1941 	shift = __ffs(mask);
1942 	val &= ~mask;
1943 	val |= ((data << shift) & mask);
1944 
1945 	ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
1946 	if (ret)
1947 		return ret;
1948 
1949 	return 0;
1950 }
1951 
1952 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1953 {
1954 	int ret;
1955 	u16 val;
1956 
1957 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1958 	if (ret)
1959 		return ret;
1960 	ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
1961 	if (ret)
1962 		return ret;
1963 
1964 	return 0;
1965 }
1966 
1967 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1968 {
1969 	int ret;
1970 	u16 val;
1971 
1972 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1973 	if (ret)
1974 		return ret;
1975 	ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
1976 	if (ret)
1977 		return ret;
1978 
1979 	return 0;
1980 }
1981 
1982 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
1983 {
1984 	u16 addr_2lsb = addr & B_AX_DBI_2LSB;
1985 	u16 write_addr;
1986 	u8 flag;
1987 	int ret;
1988 
1989 	write_addr = addr & B_AX_DBI_ADDR_MSK;
1990 	write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
1991 	rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
1992 	rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
1993 	rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
1994 
1995 	ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
1996 				       10 * RTW89_PCI_WR_RETRY_CNT, false,
1997 				       rtwdev, R_AX_DBI_FLAG + 2);
1998 	if (ret)
1999 		rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
2000 			  addr);
2001 
2002 	return ret;
2003 }
2004 
2005 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
2006 {
2007 	u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
2008 	u8 flag;
2009 	int ret;
2010 
2011 	rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
2012 	rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
2013 
2014 	ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2015 				       10 * RTW89_PCI_WR_RETRY_CNT, false,
2016 				       rtwdev, R_AX_DBI_FLAG + 2);
2017 	if (ret) {
2018 		rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
2019 			  addr);
2020 		return ret;
2021 	}
2022 
2023 	read_addr = R_AX_DBI_RDATA + (addr & 3);
2024 	*value = rtw89_read8(rtwdev, read_addr);
2025 
2026 	return 0;
2027 }
2028 
2029 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2030 				       u8 data)
2031 {
2032 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2033 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2034 	struct pci_dev *pdev = rtwpci->pdev;
2035 	int ret;
2036 
2037 	ret = pci_write_config_byte(pdev, addr, data);
2038 	if (!ret)
2039 		return 0;
2040 
2041 	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
2042 		ret = rtw89_dbi_write8(rtwdev, addr, data);
2043 
2044 	return ret;
2045 }
2046 
2047 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2048 				      u8 *value)
2049 {
2050 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2051 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2052 	struct pci_dev *pdev = rtwpci->pdev;
2053 	int ret;
2054 
2055 	ret = pci_read_config_byte(pdev, addr, value);
2056 	if (!ret)
2057 		return 0;
2058 
2059 	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
2060 		ret = rtw89_dbi_read8(rtwdev, addr, value);
2061 
2062 	return ret;
2063 }
2064 
2065 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
2066 				     u8 bit)
2067 {
2068 	u8 value;
2069 	int ret;
2070 
2071 	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2072 	if (ret)
2073 		return ret;
2074 
2075 	value |= bit;
2076 	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2077 
2078 	return ret;
2079 }
2080 
2081 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
2082 				     u8 bit)
2083 {
2084 	u8 value;
2085 	int ret;
2086 
2087 	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2088 	if (ret)
2089 		return ret;
2090 
2091 	value &= ~bit;
2092 	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2093 
2094 	return ret;
2095 }
2096 
2097 static int
2098 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
2099 {
2100 	u16 val, tar;
2101 	int ret;
2102 
2103 	/* Enable counter */
2104 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
2105 	if (ret)
2106 		return ret;
2107 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2108 				 phy_rate);
2109 	if (ret)
2110 		return ret;
2111 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
2112 				 phy_rate);
2113 	if (ret)
2114 		return ret;
2115 
2116 	fsleep(300);
2117 
2118 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
2119 	if (ret)
2120 		return ret;
2121 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2122 				 phy_rate);
2123 	if (ret)
2124 		return ret;
2125 
2126 	tar = tar & 0x0FFF;
2127 	if (tar == 0 || tar == 0x0FFF) {
2128 		rtw89_err(rtwdev, "[ERR]Get target failed.\n");
2129 		return -EINVAL;
2130 	}
2131 
2132 	*target = tar;
2133 
2134 	return 0;
2135 }
2136 
2137 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
2138 {
2139 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2140 	int ret;
2141 
2142 	if (chip_id != RTL8852B && chip_id != RTL8851B)
2143 		return 0;
2144 
2145 	ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
2146 				      PCIE_AUTOK_4, PCIE_PHY_GEN1);
2147 	return ret;
2148 }
2149 
2150 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
2151 {
2152 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2153 	enum rtw89_pcie_phy phy_rate;
2154 	u16 val16, mgn_set, div_set, tar;
2155 	u8 val8, bdr_ori;
2156 	bool l1_flag = false;
2157 	int ret = 0;
2158 
2159 	if (chip_id != RTL8852B && chip_id != RTL8851B)
2160 		return 0;
2161 
2162 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
2163 	if (ret) {
2164 		rtw89_err(rtwdev, "[ERR]pci config read %X\n",
2165 			  RTW89_PCIE_PHY_RATE);
2166 		return ret;
2167 	}
2168 
2169 	if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
2170 		phy_rate = PCIE_PHY_GEN1;
2171 	} else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
2172 		phy_rate = PCIE_PHY_GEN2;
2173 	} else {
2174 		rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
2175 		return -EOPNOTSUPP;
2176 	}
2177 	/* Disable L1BD */
2178 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
2179 	if (ret) {
2180 		rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
2181 		return ret;
2182 	}
2183 
2184 	if (bdr_ori & RTW89_PCIE_BIT_L1) {
2185 		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2186 						  bdr_ori & ~RTW89_PCIE_BIT_L1);
2187 		if (ret) {
2188 			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2189 				  RTW89_PCIE_L1_CTRL);
2190 			return ret;
2191 		}
2192 		l1_flag = true;
2193 	}
2194 
2195 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2196 	if (ret) {
2197 		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2198 		goto end;
2199 	}
2200 
2201 	if (val16 & B_AX_CALIB_EN) {
2202 		ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
2203 					 val16 & ~B_AX_CALIB_EN, phy_rate);
2204 		if (ret) {
2205 			rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2206 			goto end;
2207 		}
2208 	}
2209 
2210 	if (!autook_en)
2211 		goto end;
2212 	/* Set div */
2213 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
2214 	if (ret) {
2215 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2216 		goto end;
2217 	}
2218 
2219 	/* Obtain div and margin */
2220 	ret = __get_target(rtwdev, &tar, phy_rate);
2221 	if (ret) {
2222 		rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
2223 		goto end;
2224 	}
2225 
2226 	mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
2227 
2228 	if (mgn_set >= 128) {
2229 		div_set = 0x0003;
2230 		mgn_set = 0x000F;
2231 	} else if (mgn_set >= 64) {
2232 		div_set = 0x0003;
2233 		mgn_set >>= 3;
2234 	} else if (mgn_set >= 32) {
2235 		div_set = 0x0002;
2236 		mgn_set >>= 2;
2237 	} else if (mgn_set >= 16) {
2238 		div_set = 0x0001;
2239 		mgn_set >>= 1;
2240 	} else if (mgn_set == 0) {
2241 		rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
2242 		goto end;
2243 	} else {
2244 		div_set = 0x0000;
2245 	}
2246 
2247 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2248 	if (ret) {
2249 		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2250 		goto end;
2251 	}
2252 
2253 	val16 |= u16_encode_bits(div_set, B_AX_DIV);
2254 
2255 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
2256 	if (ret) {
2257 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2258 		goto end;
2259 	}
2260 
2261 	ret = __get_target(rtwdev, &tar, phy_rate);
2262 	if (ret) {
2263 		rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
2264 		goto end;
2265 	}
2266 
2267 	rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
2268 		    tar, div_set, mgn_set);
2269 	ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
2270 				 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
2271 	if (ret) {
2272 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
2273 		goto end;
2274 	}
2275 
2276 	/* Enable function */
2277 	ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
2278 	if (ret) {
2279 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2280 		goto end;
2281 	}
2282 
2283 	/* CLK delay = 0 */
2284 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2285 					  PCIE_CLKDLY_HW_0);
2286 
2287 end:
2288 	/* Set L1BD to ori */
2289 	if (l1_flag) {
2290 		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2291 						  bdr_ori);
2292 		if (ret) {
2293 			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2294 				  RTW89_PCIE_L1_CTRL);
2295 			return ret;
2296 		}
2297 	}
2298 
2299 	return ret;
2300 }
2301 
2302 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
2303 {
2304 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2305 	int ret;
2306 
2307 	if (chip_id == RTL8852A) {
2308 		ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2309 					     PCIE_PHY_GEN1);
2310 		if (ret)
2311 			return ret;
2312 		ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2313 					     PCIE_PHY_GEN2);
2314 		if (ret)
2315 			return ret;
2316 	} else if (chip_id == RTL8852C) {
2317 		rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
2318 				  B_AX_DEGLITCH);
2319 		rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2320 				  B_AX_DEGLITCH);
2321 	}
2322 
2323 	return 0;
2324 }
2325 
2326 static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
2327 {
2328 	u16 g1_oobs, g2_oobs;
2329 	u32 backup_aspm;
2330 	u32 phy_offset;
2331 	u16 oobs_val;
2332 	u16 val16;
2333 	int ret;
2334 
2335 	if (rtwdev->chip->chip_id != RTL8852C)
2336 		return;
2337 
2338 	backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
2339 	rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
2340 
2341 	g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2342 					    RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2343 	g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
2344 					    RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2345 	if (g1_oobs && g2_oobs)
2346 		goto out;
2347 
2348 	ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset);
2349 	if (ret)
2350 		goto out;
2351 
2352 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN);
2353 	rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
2354 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
2355 
2356 	val16 = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
2357 				  OOBS_LEVEL_MASK);
2358 	oobs_val = u16_encode_bits(val16, OOBS_SEN_MASK);
2359 
2360 	rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, oobs_val);
2361 	rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
2362 			  BAC_OOBS_SEL);
2363 
2364 	rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, oobs_val);
2365 	rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
2366 			  BAC_OOBS_SEL);
2367 
2368 out:
2369 	rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm);
2370 }
2371 
2372 static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
2373 {
2374 	u32 phy_offset;
2375 
2376 	if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
2377 		return;
2378 
2379 	phy_offset = R_RAC_DIRECT_OFFSET_G1;
2380 	rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
2381 	rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2382 
2383 	phy_offset = R_RAC_DIRECT_OFFSET_G2;
2384 	rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
2385 	rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2386 }
2387 
2388 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2389 {
2390 	if (rtwdev->chip->chip_id != RTL8852A)
2391 		return;
2392 
2393 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2394 }
2395 
2396 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2397 {
2398 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2399 
2400 	if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
2401 		return;
2402 
2403 	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2404 }
2405 
2406 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2407 {
2408 	int ret;
2409 
2410 	if (rtwdev->chip->chip_id != RTL8852A)
2411 		return 0;
2412 
2413 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2414 				     PCIE_PHY_GEN1);
2415 	if (ret)
2416 		return ret;
2417 
2418 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2419 				     PCIE_PHY_GEN2);
2420 	if (ret)
2421 		return ret;
2422 
2423 	return 0;
2424 }
2425 
2426 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2427 {
2428 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2429 
2430 	if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
2431 		return;
2432 
2433 	rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2434 }
2435 
2436 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2437 {
2438 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2439 
2440 	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
2441 		rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2442 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2443 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2444 				  B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2445 	} else if (rtwdev->chip->chip_id == RTL8852C) {
2446 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2447 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2448 	}
2449 }
2450 
2451 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2452 {
2453 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2454 
2455 	if (chip_id != RTL8852B && chip_id != RTL8851B)
2456 		return 0;
2457 
2458 	return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2459 				       PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
2460 }
2461 
2462 static void rtw89_pci_power_wake(struct rtw89_dev *rtwdev, bool pwr_up)
2463 {
2464 	if (pwr_up)
2465 		rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2466 	else
2467 		rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2468 }
2469 
2470 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2471 {
2472 	if (rtwdev->chip->chip_id != RTL8852C)
2473 		return;
2474 
2475 	rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2476 	rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2477 }
2478 
2479 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2480 {
2481 	if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2482 		return;
2483 
2484 	rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2485 }
2486 
2487 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2488 {
2489 	if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2490 		return;
2491 
2492 	rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2493 			  B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2494 	rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2495 	rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2496 			  B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2497 }
2498 
2499 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2500 {
2501 	if (rtwdev->chip->chip_id != RTL8852C)
2502 		return;
2503 
2504 	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2505 }
2506 
2507 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2508 {
2509 	if (rtwdev->chip->chip_id != RTL8852C)
2510 		return;
2511 
2512 	rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2513 }
2514 
2515 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2516 {
2517 	if (rtwdev->chip->chip_id == RTL8852C)
2518 		return;
2519 
2520 	rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2521 			  B_AX_SIC_EN_FORCE_CLKREQ);
2522 }
2523 
2524 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2525 {
2526 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2527 	u32 lbc;
2528 
2529 	if (rtwdev->chip->chip_id == RTL8852C)
2530 		return;
2531 
2532 	lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2533 	if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2534 		lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
2535 		lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2536 		rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2537 	} else {
2538 		lbc &= ~B_AX_LBC_EN;
2539 	}
2540 	rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2541 }
2542 
2543 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2544 {
2545 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2546 	u32 val32;
2547 
2548 	if (rtwdev->chip->chip_id != RTL8852C)
2549 		return;
2550 
2551 	if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2552 		val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2553 				   info->io_rcy_tmr);
2554 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
2555 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
2556 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
2557 
2558 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2559 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2560 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2561 	} else {
2562 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2563 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2564 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2565 	}
2566 
2567 	rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2568 }
2569 
2570 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2571 {
2572 	if (rtwdev->chip->chip_id == RTL8852C)
2573 		return;
2574 
2575 	rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2576 			  B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2577 
2578 	if (rtwdev->chip->chip_id == RTL8852A)
2579 		rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2580 				  B_AX_EN_CHKDSC_NO_RX_STUCK);
2581 }
2582 
2583 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2584 {
2585 	if (rtwdev->chip->chip_id == RTL8852C)
2586 		return;
2587 
2588 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2589 			  B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2590 }
2591 
2592 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
2593 {
2594 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2595 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2596 	u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2597 		  B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2598 		  B_AX_CLR_CH12_IDX;
2599 	u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2600 	u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2601 
2602 	if (chip_id == RTL8852A || chip_id == RTL8852C)
2603 		val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2604 		       B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2605 	/* clear DMA indexes */
2606 	rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
2607 	if (chip_id == RTL8852A || chip_id == RTL8852C)
2608 		rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
2609 				  B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2610 	rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
2611 			  B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2612 }
2613 
2614 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2615 {
2616 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2617 	u32 ret, check, dma_busy;
2618 	u32 dma_busy1 = info->dma_busy1.addr;
2619 	u32 dma_busy2 = info->dma_busy2_reg;
2620 
2621 	check = info->dma_busy1.mask;
2622 
2623 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2624 				10, 100, false, rtwdev, dma_busy1);
2625 	if (ret)
2626 		return ret;
2627 
2628 	if (!dma_busy2)
2629 		return 0;
2630 
2631 	check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2632 
2633 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2634 				10, 100, false, rtwdev, dma_busy2);
2635 	if (ret)
2636 		return ret;
2637 
2638 	return 0;
2639 }
2640 
2641 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2642 {
2643 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2644 	u32 ret, check, dma_busy;
2645 	u32 dma_busy3 = info->dma_busy3_reg;
2646 
2647 	check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2648 
2649 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2650 				10, 100, false, rtwdev, dma_busy3);
2651 	if (ret)
2652 		return ret;
2653 
2654 	return 0;
2655 }
2656 
2657 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2658 {
2659 	u32 ret;
2660 
2661 	ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
2662 	if (ret) {
2663 		rtw89_err(rtwdev, "txdma ch busy\n");
2664 		return ret;
2665 	}
2666 
2667 	ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
2668 	if (ret) {
2669 		rtw89_err(rtwdev, "rxdma ch busy\n");
2670 		return ret;
2671 	}
2672 
2673 	return 0;
2674 }
2675 
2676 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
2677 {
2678 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2679 	enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
2680 	enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
2681 	enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
2682 	enum mac_ax_tag_mode tag_mode = info->tag_mode;
2683 	enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
2684 	enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
2685 	enum mac_ax_tx_burst tx_burst = info->tx_burst;
2686 	enum mac_ax_rx_burst rx_burst = info->rx_burst;
2687 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2688 	u8 cv = rtwdev->hal.cv;
2689 	u32 val32;
2690 
2691 	if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2692 		if (chip_id == RTL8852A && cv == CHIP_CBV)
2693 			rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2694 	} else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2695 		if (chip_id == RTL8852A || chip_id == RTL8852B)
2696 			rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2697 	}
2698 
2699 	if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
2700 		if (chip_id == RTL8852A && cv == CHIP_CBV)
2701 			rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2702 	} else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
2703 		if (chip_id == RTL8852A || chip_id == RTL8852B)
2704 			rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2705 	}
2706 
2707 	if (rxbd_mode == MAC_AX_RXBD_PKT) {
2708 		rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2709 	} else if (rxbd_mode == MAC_AX_RXBD_SEP) {
2710 		rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2711 
2712 		if (chip_id == RTL8852A || chip_id == RTL8852B)
2713 			rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
2714 					   B_AX_PCIE_RX_APPLEN_MASK, 0);
2715 	}
2716 
2717 	if (chip_id == RTL8852A || chip_id == RTL8852B) {
2718 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
2719 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
2720 	} else if (chip_id == RTL8852C) {
2721 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
2722 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
2723 	}
2724 
2725 	if (chip_id == RTL8852A || chip_id == RTL8852B) {
2726 		if (tag_mode == MAC_AX_TAG_SGL) {
2727 			val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
2728 					    ~B_AX_LATENCY_CONTROL;
2729 			rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2730 		} else if (tag_mode == MAC_AX_TAG_MULTI) {
2731 			val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
2732 					    B_AX_LATENCY_CONTROL;
2733 			rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2734 		}
2735 	}
2736 
2737 	rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
2738 			   info->multi_tag_num);
2739 
2740 	if (chip_id == RTL8852A || chip_id == RTL8852B) {
2741 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
2742 				   wd_dma_idle_intvl);
2743 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
2744 				   wd_dma_act_intvl);
2745 	} else if (chip_id == RTL8852C) {
2746 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
2747 				   wd_dma_idle_intvl);
2748 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
2749 				   wd_dma_act_intvl);
2750 	}
2751 
2752 	if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2753 		rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2754 				  B_AX_HOST_ADDR_INFO_8B_SEL);
2755 		rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2756 	} else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2757 		rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2758 				  B_AX_HOST_ADDR_INFO_8B_SEL);
2759 		rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2760 	}
2761 
2762 	return 0;
2763 }
2764 
2765 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
2766 {
2767 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2768 
2769 	if (rtwdev->chip->chip_id == RTL8852A) {
2770 		/* ltr sw trigger */
2771 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
2772 	}
2773 	info->ltr_set(rtwdev, false);
2774 	rtw89_pci_ctrl_dma_all(rtwdev, false);
2775 	rtw89_pci_clr_idx_all(rtwdev);
2776 
2777 	return 0;
2778 }
2779 
2780 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
2781 {
2782 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2783 	int ret;
2784 
2785 	rtw89_pci_disable_eq(rtwdev);
2786 	rtw89_pci_ber(rtwdev);
2787 	rtw89_pci_rxdma_prefth(rtwdev);
2788 	rtw89_pci_l1off_pwroff(rtwdev);
2789 	rtw89_pci_deglitch_setting(rtwdev);
2790 	ret = rtw89_pci_l2_rxen_lat(rtwdev);
2791 	if (ret) {
2792 		rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
2793 		return ret;
2794 	}
2795 
2796 	rtw89_pci_aphy_pwrcut(rtwdev);
2797 	rtw89_pci_hci_ldo(rtwdev);
2798 	rtw89_pci_dphy_delay(rtwdev);
2799 
2800 	ret = rtw89_pci_autok_x(rtwdev);
2801 	if (ret) {
2802 		rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
2803 		return ret;
2804 	}
2805 
2806 	ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
2807 	if (ret) {
2808 		rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
2809 		return ret;
2810 	}
2811 
2812 	rtw89_pci_power_wake(rtwdev, true);
2813 	rtw89_pci_autoload_hang(rtwdev);
2814 	rtw89_pci_l12_vmain(rtwdev);
2815 	rtw89_pci_gen2_force_ib(rtwdev);
2816 	rtw89_pci_l1_ent_lat(rtwdev);
2817 	rtw89_pci_wd_exit_l1(rtwdev);
2818 	rtw89_pci_set_sic(rtwdev);
2819 	rtw89_pci_set_lbc(rtwdev);
2820 	rtw89_pci_set_io_rcy(rtwdev);
2821 	rtw89_pci_set_dbg(rtwdev);
2822 	rtw89_pci_set_keep_reg(rtwdev);
2823 
2824 	rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
2825 
2826 	/* stop DMA activities */
2827 	rtw89_pci_ctrl_dma_all(rtwdev, false);
2828 
2829 	ret = rtw89_pci_poll_dma_all_idle(rtwdev);
2830 	if (ret) {
2831 		rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
2832 		return ret;
2833 	}
2834 
2835 	rtw89_pci_clr_idx_all(rtwdev);
2836 	rtw89_pci_mode_op(rtwdev);
2837 
2838 	/* fill TRX BD indexes */
2839 	rtw89_pci_ops_reset(rtwdev);
2840 
2841 	ret = rtw89_pci_rst_bdram_ax(rtwdev);
2842 	if (ret) {
2843 		rtw89_warn(rtwdev, "reset bdram busy\n");
2844 		return ret;
2845 	}
2846 
2847 	/* disable all channels except to FW CMD channel to download firmware */
2848 	rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false);
2849 	rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true);
2850 
2851 	/* start DMA activities */
2852 	rtw89_pci_ctrl_dma_all(rtwdev, true);
2853 
2854 	return 0;
2855 }
2856 
2857 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
2858 {
2859 	u32 val;
2860 
2861 	if (!en)
2862 		return 0;
2863 
2864 	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2865 	if (rtw89_pci_ltr_is_err_reg_val(val))
2866 		return -EINVAL;
2867 	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2868 	if (rtw89_pci_ltr_is_err_reg_val(val))
2869 		return -EINVAL;
2870 	val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
2871 	if (rtw89_pci_ltr_is_err_reg_val(val))
2872 		return -EINVAL;
2873 	val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
2874 	if (rtw89_pci_ltr_is_err_reg_val(val))
2875 		return -EINVAL;
2876 
2877 	rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
2878 						   B_AX_LTR_WD_NOEMP_CHK);
2879 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
2880 			   PCI_LTR_SPC_500US);
2881 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2882 			   PCI_LTR_IDLE_TIMER_3_2MS);
2883 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2884 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2885 	rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
2886 	rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
2887 
2888 	return 0;
2889 }
2890 EXPORT_SYMBOL(rtw89_pci_ltr_set);
2891 
2892 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
2893 {
2894 	u32 dec_ctrl;
2895 	u32 val32;
2896 
2897 	val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2898 	if (rtw89_pci_ltr_is_err_reg_val(val32))
2899 		return -EINVAL;
2900 	val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2901 	if (rtw89_pci_ltr_is_err_reg_val(val32))
2902 		return -EINVAL;
2903 	dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
2904 	if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
2905 		return -EINVAL;
2906 	val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
2907 	if (rtw89_pci_ltr_is_err_reg_val(val32))
2908 		return -EINVAL;
2909 	val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
2910 	if (rtw89_pci_ltr_is_err_reg_val(val32))
2911 		return -EINVAL;
2912 
2913 	if (!en) {
2914 		dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
2915 		dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
2916 			    B_AX_LTR_REQ_DRV;
2917 	} else {
2918 		dec_ctrl |= B_AX_LTR_HW_DEC_EN;
2919 	}
2920 
2921 	dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
2922 	dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
2923 
2924 	if (en)
2925 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
2926 				  B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
2927 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2928 			   PCI_LTR_IDLE_TIMER_3_2MS);
2929 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2930 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2931 	rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
2932 	rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
2933 	rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
2934 
2935 	return 0;
2936 }
2937 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
2938 
2939 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
2940 {
2941 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2942 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2943 	int ret;
2944 
2945 	ret = info->ltr_set(rtwdev, true);
2946 	if (ret) {
2947 		rtw89_err(rtwdev, "pci ltr set fail\n");
2948 		return ret;
2949 	}
2950 	if (chip_id == RTL8852A) {
2951 		/* ltr sw trigger */
2952 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
2953 	}
2954 	if (chip_id == RTL8852A || chip_id == RTL8852B) {
2955 		/* ADDR info 8-byte mode */
2956 		rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2957 				  B_AX_HOST_ADDR_INFO_8B_SEL);
2958 		rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2959 	}
2960 
2961 	/* enable DMA for all queues */
2962 	rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true);
2963 
2964 	/* Release PCI IO */
2965 	rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
2966 			  B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
2967 
2968 	return 0;
2969 }
2970 
2971 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
2972 				  struct pci_dev *pdev)
2973 {
2974 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2975 	int ret;
2976 
2977 	ret = pci_enable_device(pdev);
2978 	if (ret) {
2979 		rtw89_err(rtwdev, "failed to enable pci device\n");
2980 		return ret;
2981 	}
2982 
2983 	pci_set_master(pdev);
2984 	pci_set_drvdata(pdev, rtwdev->hw);
2985 
2986 	rtwpci->pdev = pdev;
2987 
2988 	return 0;
2989 }
2990 
2991 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
2992 				     struct pci_dev *pdev)
2993 {
2994 	pci_disable_device(pdev);
2995 }
2996 
2997 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
2998 				   struct pci_dev *pdev)
2999 {
3000 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3001 	unsigned long resource_len;
3002 	u8 bar_id = 2;
3003 	int ret;
3004 
3005 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
3006 	if (ret) {
3007 		rtw89_err(rtwdev, "failed to request pci regions\n");
3008 		goto err;
3009 	}
3010 
3011 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3012 	if (ret) {
3013 		rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
3014 		goto err_release_regions;
3015 	}
3016 
3017 	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
3018 	if (ret) {
3019 		rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
3020 		goto err_release_regions;
3021 	}
3022 
3023 	resource_len = pci_resource_len(pdev, bar_id);
3024 	rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
3025 	if (!rtwpci->mmap) {
3026 		rtw89_err(rtwdev, "failed to map pci io\n");
3027 		ret = -EIO;
3028 		goto err_release_regions;
3029 	}
3030 
3031 	return 0;
3032 
3033 err_release_regions:
3034 	pci_release_regions(pdev);
3035 err:
3036 	return ret;
3037 }
3038 
3039 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
3040 				    struct pci_dev *pdev)
3041 {
3042 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3043 
3044 	if (rtwpci->mmap) {
3045 		pci_iounmap(pdev, rtwpci->mmap);
3046 		pci_release_regions(pdev);
3047 	}
3048 }
3049 
3050 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
3051 				      struct pci_dev *pdev,
3052 				      struct rtw89_pci_tx_ring *tx_ring)
3053 {
3054 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3055 	u8 *head = wd_ring->head;
3056 	dma_addr_t dma = wd_ring->dma;
3057 	u32 page_size = wd_ring->page_size;
3058 	u32 page_num = wd_ring->page_num;
3059 	u32 ring_sz = page_size * page_num;
3060 
3061 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3062 	wd_ring->head = NULL;
3063 }
3064 
3065 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
3066 				   struct pci_dev *pdev,
3067 				   struct rtw89_pci_tx_ring *tx_ring)
3068 {
3069 	int ring_sz;
3070 	u8 *head;
3071 	dma_addr_t dma;
3072 
3073 	head = tx_ring->bd_ring.head;
3074 	dma = tx_ring->bd_ring.dma;
3075 	ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
3076 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3077 
3078 	tx_ring->bd_ring.head = NULL;
3079 }
3080 
3081 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
3082 				    struct pci_dev *pdev)
3083 {
3084 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3085 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3086 	struct rtw89_pci_tx_ring *tx_ring;
3087 	int i;
3088 
3089 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
3090 		if (info->tx_dma_ch_mask & BIT(i))
3091 			continue;
3092 		tx_ring = &rtwpci->tx_rings[i];
3093 		rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3094 		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3095 	}
3096 }
3097 
3098 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
3099 				   struct pci_dev *pdev,
3100 				   struct rtw89_pci_rx_ring *rx_ring)
3101 {
3102 	struct rtw89_pci_rx_info *rx_info;
3103 	struct sk_buff *skb;
3104 	dma_addr_t dma;
3105 	u32 buf_sz;
3106 	u8 *head;
3107 	int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
3108 	int i;
3109 
3110 	buf_sz = rx_ring->buf_sz;
3111 	for (i = 0; i < rx_ring->bd_ring.len; i++) {
3112 		skb = rx_ring->buf[i];
3113 		if (!skb)
3114 			continue;
3115 
3116 		rx_info = RTW89_PCI_RX_SKB_CB(skb);
3117 		dma = rx_info->dma;
3118 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3119 		dev_kfree_skb(skb);
3120 		rx_ring->buf[i] = NULL;
3121 	}
3122 
3123 	head = rx_ring->bd_ring.head;
3124 	dma = rx_ring->bd_ring.dma;
3125 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3126 
3127 	rx_ring->bd_ring.head = NULL;
3128 }
3129 
3130 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
3131 				    struct pci_dev *pdev)
3132 {
3133 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3134 	struct rtw89_pci_rx_ring *rx_ring;
3135 	int i;
3136 
3137 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
3138 		rx_ring = &rtwpci->rx_rings[i];
3139 		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3140 	}
3141 }
3142 
3143 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
3144 				     struct pci_dev *pdev)
3145 {
3146 	rtw89_pci_free_rx_rings(rtwdev, pdev);
3147 	rtw89_pci_free_tx_rings(rtwdev, pdev);
3148 }
3149 
3150 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
3151 				struct rtw89_pci_rx_ring *rx_ring,
3152 				struct sk_buff *skb, int buf_sz, u32 idx)
3153 {
3154 	struct rtw89_pci_rx_info *rx_info;
3155 	struct rtw89_pci_rx_bd_32 *rx_bd;
3156 	dma_addr_t dma;
3157 
3158 	if (!skb)
3159 		return -EINVAL;
3160 
3161 	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
3162 	if (dma_mapping_error(&pdev->dev, dma))
3163 		return -EBUSY;
3164 
3165 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
3166 	rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
3167 
3168 	memset(rx_bd, 0, sizeof(*rx_bd));
3169 	rx_bd->buf_size = cpu_to_le16(buf_sz);
3170 	rx_bd->dma = cpu_to_le32(dma);
3171 	rx_info->dma = dma;
3172 
3173 	return 0;
3174 }
3175 
3176 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
3177 				      struct pci_dev *pdev,
3178 				      struct rtw89_pci_tx_ring *tx_ring,
3179 				      enum rtw89_tx_channel txch)
3180 {
3181 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3182 	struct rtw89_pci_tx_wd *txwd;
3183 	dma_addr_t dma;
3184 	dma_addr_t cur_paddr;
3185 	u8 *head;
3186 	u8 *cur_vaddr;
3187 	u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
3188 	u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
3189 	u32 ring_sz = page_size * page_num;
3190 	u32 page_offset;
3191 	int i;
3192 
3193 	/* FWCMD queue doesn't use txwd as pages */
3194 	if (txch == RTW89_TXCH_CH12)
3195 		return 0;
3196 
3197 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3198 	if (!head)
3199 		return -ENOMEM;
3200 
3201 	INIT_LIST_HEAD(&wd_ring->free_pages);
3202 	wd_ring->head = head;
3203 	wd_ring->dma = dma;
3204 	wd_ring->page_size = page_size;
3205 	wd_ring->page_num = page_num;
3206 
3207 	page_offset = 0;
3208 	for (i = 0; i < page_num; i++) {
3209 		txwd = &wd_ring->pages[i];
3210 		cur_paddr = dma + page_offset;
3211 		cur_vaddr = head + page_offset;
3212 
3213 		skb_queue_head_init(&txwd->queue);
3214 		INIT_LIST_HEAD(&txwd->list);
3215 		txwd->paddr = cur_paddr;
3216 		txwd->vaddr = cur_vaddr;
3217 		txwd->len = page_size;
3218 		txwd->seq = i;
3219 		rtw89_pci_enqueue_txwd(tx_ring, txwd);
3220 
3221 		page_offset += page_size;
3222 	}
3223 
3224 	return 0;
3225 }
3226 
3227 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
3228 				   struct pci_dev *pdev,
3229 				   struct rtw89_pci_tx_ring *tx_ring,
3230 				   u32 desc_size, u32 len,
3231 				   enum rtw89_tx_channel txch)
3232 {
3233 	const struct rtw89_pci_ch_dma_addr *txch_addr;
3234 	int ring_sz = desc_size * len;
3235 	u8 *head;
3236 	dma_addr_t dma;
3237 	int ret;
3238 
3239 	ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
3240 	if (ret) {
3241 		rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
3242 		goto err;
3243 	}
3244 
3245 	ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
3246 	if (ret) {
3247 		rtw89_err(rtwdev, "failed to get address of txch %d", txch);
3248 		goto err_free_wd_ring;
3249 	}
3250 
3251 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3252 	if (!head) {
3253 		ret = -ENOMEM;
3254 		goto err_free_wd_ring;
3255 	}
3256 
3257 	INIT_LIST_HEAD(&tx_ring->busy_pages);
3258 	tx_ring->bd_ring.head = head;
3259 	tx_ring->bd_ring.dma = dma;
3260 	tx_ring->bd_ring.len = len;
3261 	tx_ring->bd_ring.desc_size = desc_size;
3262 	tx_ring->bd_ring.addr = *txch_addr;
3263 	tx_ring->bd_ring.wp = 0;
3264 	tx_ring->bd_ring.rp = 0;
3265 	tx_ring->txch = txch;
3266 
3267 	return 0;
3268 
3269 err_free_wd_ring:
3270 	rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3271 err:
3272 	return ret;
3273 }
3274 
3275 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
3276 				    struct pci_dev *pdev)
3277 {
3278 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3279 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3280 	struct rtw89_pci_tx_ring *tx_ring;
3281 	u32 desc_size;
3282 	u32 len;
3283 	u32 i, tx_allocated;
3284 	int ret;
3285 
3286 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
3287 		if (info->tx_dma_ch_mask & BIT(i))
3288 			continue;
3289 		tx_ring = &rtwpci->tx_rings[i];
3290 		desc_size = sizeof(struct rtw89_pci_tx_bd_32);
3291 		len = RTW89_PCI_TXBD_NUM_MAX;
3292 		ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
3293 					      desc_size, len, i);
3294 		if (ret) {
3295 			rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
3296 			goto err_free;
3297 		}
3298 	}
3299 
3300 	return 0;
3301 
3302 err_free:
3303 	tx_allocated = i;
3304 	for (i = 0; i < tx_allocated; i++) {
3305 		tx_ring = &rtwpci->tx_rings[i];
3306 		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3307 	}
3308 
3309 	return ret;
3310 }
3311 
3312 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
3313 				   struct pci_dev *pdev,
3314 				   struct rtw89_pci_rx_ring *rx_ring,
3315 				   u32 desc_size, u32 len, u32 rxch)
3316 {
3317 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3318 	const struct rtw89_pci_ch_dma_addr *rxch_addr;
3319 	struct sk_buff *skb;
3320 	u8 *head;
3321 	dma_addr_t dma;
3322 	int ring_sz = desc_size * len;
3323 	int buf_sz = RTW89_PCI_RX_BUF_SIZE;
3324 	int i, allocated;
3325 	int ret;
3326 
3327 	ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
3328 	if (ret) {
3329 		rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
3330 		return ret;
3331 	}
3332 
3333 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3334 	if (!head) {
3335 		ret = -ENOMEM;
3336 		goto err;
3337 	}
3338 
3339 	rx_ring->bd_ring.head = head;
3340 	rx_ring->bd_ring.dma = dma;
3341 	rx_ring->bd_ring.len = len;
3342 	rx_ring->bd_ring.desc_size = desc_size;
3343 	rx_ring->bd_ring.addr = *rxch_addr;
3344 	if (info->rx_ring_eq_is_full)
3345 		rx_ring->bd_ring.wp = len - 1;
3346 	else
3347 		rx_ring->bd_ring.wp = 0;
3348 	rx_ring->bd_ring.rp = 0;
3349 	rx_ring->buf_sz = buf_sz;
3350 	rx_ring->diliver_skb = NULL;
3351 	rx_ring->diliver_desc.ready = false;
3352 	rx_ring->target_rx_tag = 0;
3353 
3354 	for (i = 0; i < len; i++) {
3355 		skb = dev_alloc_skb(buf_sz);
3356 		if (!skb) {
3357 			ret = -ENOMEM;
3358 			goto err_free;
3359 		}
3360 
3361 		memset(skb->data, 0, buf_sz);
3362 		rx_ring->buf[i] = skb;
3363 		ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
3364 					   buf_sz, i);
3365 		if (ret) {
3366 			rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
3367 			dev_kfree_skb_any(skb);
3368 			rx_ring->buf[i] = NULL;
3369 			goto err_free;
3370 		}
3371 	}
3372 
3373 	return 0;
3374 
3375 err_free:
3376 	allocated = i;
3377 	for (i = 0; i < allocated; i++) {
3378 		skb = rx_ring->buf[i];
3379 		if (!skb)
3380 			continue;
3381 		dma = *((dma_addr_t *)skb->cb);
3382 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3383 		dev_kfree_skb(skb);
3384 		rx_ring->buf[i] = NULL;
3385 	}
3386 
3387 	head = rx_ring->bd_ring.head;
3388 	dma = rx_ring->bd_ring.dma;
3389 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3390 
3391 	rx_ring->bd_ring.head = NULL;
3392 err:
3393 	return ret;
3394 }
3395 
3396 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
3397 				    struct pci_dev *pdev)
3398 {
3399 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3400 	struct rtw89_pci_rx_ring *rx_ring;
3401 	u32 desc_size;
3402 	u32 len;
3403 	int i, rx_allocated;
3404 	int ret;
3405 
3406 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
3407 		rx_ring = &rtwpci->rx_rings[i];
3408 		desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3409 		len = RTW89_PCI_RXBD_NUM_MAX;
3410 		ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3411 					      desc_size, len, i);
3412 		if (ret) {
3413 			rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3414 			goto err_free;
3415 		}
3416 	}
3417 
3418 	return 0;
3419 
3420 err_free:
3421 	rx_allocated = i;
3422 	for (i = 0; i < rx_allocated; i++) {
3423 		rx_ring = &rtwpci->rx_rings[i];
3424 		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3425 	}
3426 
3427 	return ret;
3428 }
3429 
3430 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3431 				     struct pci_dev *pdev)
3432 {
3433 	int ret;
3434 
3435 	ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3436 	if (ret) {
3437 		rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3438 		goto err;
3439 	}
3440 
3441 	ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3442 	if (ret) {
3443 		rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3444 		goto err_free_tx_rings;
3445 	}
3446 
3447 	return 0;
3448 
3449 err_free_tx_rings:
3450 	rtw89_pci_free_tx_rings(rtwdev, pdev);
3451 err:
3452 	return ret;
3453 }
3454 
3455 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3456 			       struct rtw89_pci *rtwpci)
3457 {
3458 	skb_queue_head_init(&rtwpci->h2c_queue);
3459 	skb_queue_head_init(&rtwpci->h2c_release_queue);
3460 }
3461 
3462 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3463 				    struct pci_dev *pdev)
3464 {
3465 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3466 	int ret;
3467 
3468 	ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3469 	if (ret) {
3470 		rtw89_err(rtwdev, "failed to setup pci mapping\n");
3471 		goto err;
3472 	}
3473 
3474 	ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3475 	if (ret) {
3476 		rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3477 		goto err_pci_unmap;
3478 	}
3479 
3480 	rtw89_pci_h2c_init(rtwdev, rtwpci);
3481 
3482 	spin_lock_init(&rtwpci->irq_lock);
3483 	spin_lock_init(&rtwpci->trx_lock);
3484 
3485 	return 0;
3486 
3487 err_pci_unmap:
3488 	rtw89_pci_clear_mapping(rtwdev, pdev);
3489 err:
3490 	return ret;
3491 }
3492 
3493 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3494 				     struct pci_dev *pdev)
3495 {
3496 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3497 
3498 	rtw89_pci_free_trx_rings(rtwdev, pdev);
3499 	rtw89_pci_clear_mapping(rtwdev, pdev);
3500 	rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3501 				skb_queue_len(&rtwpci->h2c_queue), true);
3502 }
3503 
3504 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3505 {
3506 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3507 	const struct rtw89_chip_info *chip = rtwdev->chip;
3508 	u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN;
3509 
3510 	if (chip->chip_id == RTL8851B)
3511 		hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND;
3512 
3513 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3514 
3515 	if (rtwpci->under_recovery) {
3516 		rtwpci->intrs[0] = hs0isr_ind_int_en;
3517 		rtwpci->intrs[1] = 0;
3518 	} else {
3519 		rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3520 				   B_AX_RXDMA_INT_EN |
3521 				   B_AX_RXP1DMA_INT_EN |
3522 				   B_AX_RPQDMA_INT_EN |
3523 				   B_AX_RXDMA_STUCK_INT_EN |
3524 				   B_AX_RDU_INT_EN |
3525 				   B_AX_RPQBD_FULL_INT_EN |
3526 				   hs0isr_ind_int_en;
3527 
3528 		rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3529 	}
3530 }
3531 EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3532 
3533 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3534 {
3535 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3536 
3537 	rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3538 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3539 	rtwpci->intrs[0] = 0;
3540 	rtwpci->intrs[1] = 0;
3541 }
3542 
3543 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3544 {
3545 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3546 
3547 	rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
3548 			    B_AX_HS1ISR_IND_INT_EN |
3549 			    B_AX_HS0ISR_IND_INT_EN;
3550 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3551 	rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3552 			   B_AX_RXDMA_INT_EN |
3553 			   B_AX_RXP1DMA_INT_EN |
3554 			   B_AX_RPQDMA_INT_EN |
3555 			   B_AX_RXDMA_STUCK_INT_EN |
3556 			   B_AX_RDU_INT_EN |
3557 			   B_AX_RPQBD_FULL_INT_EN;
3558 	rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3559 }
3560 
3561 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
3562 {
3563 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3564 
3565 	rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
3566 			    B_AX_HS0ISR_IND_INT_EN;
3567 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3568 	rtwpci->intrs[0] = 0;
3569 	rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3570 }
3571 
3572 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
3573 {
3574 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3575 
3576 	if (rtwpci->under_recovery)
3577 		rtw89_pci_recovery_intr_mask_v1(rtwdev);
3578 	else if (rtwpci->low_power)
3579 		rtw89_pci_low_power_intr_mask_v1(rtwdev);
3580 	else
3581 		rtw89_pci_default_intr_mask_v1(rtwdev);
3582 }
3583 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
3584 
3585 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
3586 {
3587 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3588 
3589 	rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
3590 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3591 	rtwpci->intrs[0] = 0;
3592 	rtwpci->intrs[1] = 0;
3593 }
3594 
3595 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
3596 {
3597 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3598 
3599 	rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 |
3600 			    B_BE_HS0_IND_INT_EN0;
3601 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3602 	rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 |
3603 			   B_BE_RDU_CH0_INT_IMR_V1;
3604 	rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3605 			   B_BE_PCIE_RX_RPQ0_IMR0_V1;
3606 }
3607 
3608 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev)
3609 {
3610 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3611 
3612 	rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 |
3613 			    B_BE_HS1_IND_INT_EN0;
3614 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3615 	rtwpci->intrs[0] = 0;
3616 	rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3617 			   B_BE_PCIE_RX_RPQ0_IMR0_V1;
3618 }
3619 
3620 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
3621 {
3622 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3623 
3624 	if (rtwpci->under_recovery)
3625 		rtw89_pci_recovery_intr_mask_v2(rtwdev);
3626 	else if (rtwpci->low_power)
3627 		rtw89_pci_low_power_intr_mask_v2(rtwdev);
3628 	else
3629 		rtw89_pci_default_intr_mask_v2(rtwdev);
3630 }
3631 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
3632 
3633 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
3634 				 struct pci_dev *pdev)
3635 {
3636 	unsigned long flags = 0;
3637 	int ret;
3638 
3639 	flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
3640 	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3641 	if (ret < 0) {
3642 		rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
3643 		goto err;
3644 	}
3645 
3646 	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
3647 					rtw89_pci_interrupt_handler,
3648 					rtw89_pci_interrupt_threadfn,
3649 					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
3650 	if (ret) {
3651 		rtw89_err(rtwdev, "failed to request threaded irq\n");
3652 		goto err_free_vector;
3653 	}
3654 
3655 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
3656 
3657 	return 0;
3658 
3659 err_free_vector:
3660 	pci_free_irq_vectors(pdev);
3661 err:
3662 	return ret;
3663 }
3664 
3665 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
3666 			       struct pci_dev *pdev)
3667 {
3668 	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
3669 	pci_free_irq_vectors(pdev);
3670 }
3671 
3672 static u16 gray_code_to_bin(u16 gray_code, u32 bit_num)
3673 {
3674 	u16 bin = 0, gray_bit;
3675 	u32 bit_idx;
3676 
3677 	for (bit_idx = 0; bit_idx < bit_num; bit_idx++) {
3678 		gray_bit = (gray_code >> bit_idx) & 0x1;
3679 		if (bit_num - bit_idx > 1)
3680 			gray_bit ^= (gray_code >> (bit_idx + 1)) & 0x1;
3681 		bin |= (gray_bit << bit_idx);
3682 	}
3683 
3684 	return bin;
3685 }
3686 
3687 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
3688 {
3689 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3690 	struct pci_dev *pdev = rtwpci->pdev;
3691 	u16 val16, filter_out_val;
3692 	u32 val, phy_offset;
3693 	int ret;
3694 
3695 	if (rtwdev->chip->chip_id != RTL8852C)
3696 		return 0;
3697 
3698 	val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
3699 	if (val == B_AX_ASPM_CTRL_L1)
3700 		return 0;
3701 
3702 	ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
3703 	if (ret)
3704 		return ret;
3705 
3706 	val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
3707 	if (val == RTW89_PCIE_GEN1_SPEED) {
3708 		phy_offset = R_RAC_DIRECT_OFFSET_G1;
3709 	} else if (val == RTW89_PCIE_GEN2_SPEED) {
3710 		phy_offset = R_RAC_DIRECT_OFFSET_G2;
3711 		val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
3712 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
3713 				  val16 | B_PCIE_BIT_PINOUT_DIS);
3714 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
3715 				  val16 & ~B_PCIE_BIT_RD_SEL);
3716 
3717 		val16 = rtw89_read16_mask(rtwdev,
3718 					  phy_offset + RAC_ANA1F * RAC_MULT,
3719 					  FILTER_OUT_EQ_MASK);
3720 		val16 = gray_code_to_bin(val16, hweight16(val16));
3721 		filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
3722 					      RAC_MULT);
3723 		filter_out_val &= ~REG_FILTER_OUT_MASK;
3724 		filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
3725 
3726 		rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
3727 			      filter_out_val);
3728 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
3729 				  B_BAC_EQ_SEL);
3730 		rtw89_write16_set(rtwdev,
3731 				  R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
3732 				  B_PCIE_BIT_PSAVE);
3733 	} else {
3734 		return -EOPNOTSUPP;
3735 	}
3736 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
3737 			  B_PCIE_BIT_PSAVE);
3738 
3739 	return 0;
3740 }
3741 
3742 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
3743 {
3744 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3745 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3746 
3747 	if (rtw89_pci_disable_clkreq)
3748 		return;
3749 
3750 	gen_def->clkreq_set(rtwdev, enable);
3751 }
3752 
3753 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
3754 {
3755 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3756 	int ret;
3757 
3758 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
3759 					  PCIE_CLKDLY_HW_30US);
3760 	if (ret)
3761 		rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
3762 
3763 	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3764 		if (enable)
3765 			ret = rtw89_pci_config_byte_set(rtwdev,
3766 							RTW89_PCIE_L1_CTRL,
3767 							RTW89_PCIE_BIT_CLK);
3768 		else
3769 			ret = rtw89_pci_config_byte_clr(rtwdev,
3770 							RTW89_PCIE_L1_CTRL,
3771 							RTW89_PCIE_BIT_CLK);
3772 		if (ret)
3773 			rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
3774 				  enable ? "set" : "unset", ret);
3775 	} else if (chip_id == RTL8852C) {
3776 		rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
3777 				  B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
3778 		if (enable)
3779 			rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
3780 					  B_AX_CLK_REQ_N);
3781 		else
3782 			rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
3783 					  B_AX_CLK_REQ_N);
3784 	}
3785 }
3786 
3787 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
3788 {
3789 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3790 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3791 
3792 	if (rtw89_pci_disable_aspm_l1)
3793 		return;
3794 
3795 	gen_def->aspm_set(rtwdev, enable);
3796 }
3797 
3798 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
3799 {
3800 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3801 	u8 value = 0;
3802 	int ret;
3803 
3804 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
3805 	if (ret)
3806 		rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
3807 
3808 	u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
3809 	u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
3810 
3811 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
3812 	if (ret)
3813 		rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
3814 
3815 	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3816 		if (enable)
3817 			ret = rtw89_pci_config_byte_set(rtwdev,
3818 							RTW89_PCIE_L1_CTRL,
3819 							RTW89_PCIE_BIT_L1);
3820 		else
3821 			ret = rtw89_pci_config_byte_clr(rtwdev,
3822 							RTW89_PCIE_L1_CTRL,
3823 							RTW89_PCIE_BIT_L1);
3824 	} else if (chip_id == RTL8852C) {
3825 		if (enable)
3826 			rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3827 					  B_AX_ASPM_CTRL_L1);
3828 		else
3829 			rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3830 					  B_AX_ASPM_CTRL_L1);
3831 	}
3832 	if (ret)
3833 		rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
3834 			  enable ? "set" : "unset", ret);
3835 }
3836 
3837 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
3838 {
3839 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
3840 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3841 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
3842 	enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
3843 	enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
3844 	u32 val = 0;
3845 
3846 	if (rtwdev->scanning ||
3847 	    (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH))
3848 		goto out;
3849 
3850 	if (chip_gen == RTW89_CHIP_BE)
3851 		val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN;
3852 	else
3853 		val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
3854 		      FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
3855 		      FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
3856 		      FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
3857 
3858 out:
3859 	rtw89_write32(rtwdev, info->mit_addr, val);
3860 }
3861 
3862 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
3863 {
3864 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3865 	struct pci_dev *pdev = rtwpci->pdev;
3866 	u16 link_ctrl;
3867 	int ret;
3868 
3869 	/* Though there is standard PCIE configuration space to set the
3870 	 * link control register, but by Realtek's design, driver should
3871 	 * check if host supports CLKREQ/ASPM to enable the HW module.
3872 	 *
3873 	 * These functions are implemented by two HW modules associated,
3874 	 * one is responsible to access PCIE configuration space to
3875 	 * follow the host settings, and another is in charge of doing
3876 	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
3877 	 * the host does not support it, and due to some reasons or wrong
3878 	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
3879 	 * loss if HW misbehaves on the link.
3880 	 *
3881 	 * Hence it's designed that driver should first check the PCIE
3882 	 * configuration space is sync'ed and enabled, then driver can turn
3883 	 * on the other module that is actually working on the mechanism.
3884 	 */
3885 	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
3886 	if (ret) {
3887 		rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
3888 		return;
3889 	}
3890 
3891 	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
3892 		rtw89_pci_clkreq_set(rtwdev, true);
3893 
3894 	if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
3895 		rtw89_pci_aspm_set(rtwdev, true);
3896 }
3897 
3898 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
3899 {
3900 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3901 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3902 
3903 	if (rtw89_pci_disable_l1ss)
3904 		return;
3905 
3906 	gen_def->l1ss_set(rtwdev, enable);
3907 }
3908 
3909 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
3910 {
3911 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3912 	int ret;
3913 
3914 	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
3915 		if (enable)
3916 			ret = rtw89_pci_config_byte_set(rtwdev,
3917 							RTW89_PCIE_TIMER_CTRL,
3918 							RTW89_PCIE_BIT_L1SUB);
3919 		else
3920 			ret = rtw89_pci_config_byte_clr(rtwdev,
3921 							RTW89_PCIE_TIMER_CTRL,
3922 							RTW89_PCIE_BIT_L1SUB);
3923 		if (ret)
3924 			rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
3925 				  enable ? "set" : "unset", ret);
3926 	} else if (chip_id == RTL8852C) {
3927 		ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
3928 						RTW89_PCIE_BIT_ASPM_L11 |
3929 						RTW89_PCIE_BIT_PCI_L11);
3930 		if (ret)
3931 			rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
3932 		if (enable)
3933 			rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3934 					  B_AX_L1SUB_DISABLE);
3935 		else
3936 			rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3937 					  B_AX_L1SUB_DISABLE);
3938 	}
3939 }
3940 
3941 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
3942 {
3943 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3944 	struct pci_dev *pdev = rtwpci->pdev;
3945 	u32 l1ss_cap_ptr, l1ss_ctrl;
3946 
3947 	if (rtw89_pci_disable_l1ss)
3948 		return;
3949 
3950 	l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3951 	if (!l1ss_cap_ptr)
3952 		return;
3953 
3954 	pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
3955 
3956 	if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
3957 		rtw89_pci_l1ss_set(rtwdev, true);
3958 }
3959 
3960 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
3961 {
3962 	int ret = 0;
3963 	u32 sts;
3964 	u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
3965 
3966 	ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
3967 				       10, 1000, false, rtwdev,
3968 				       R_AX_PCIE_DMA_BUSY1);
3969 	if (ret) {
3970 		rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
3971 			  rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
3972 		return -EINVAL;
3973 	}
3974 	return ret;
3975 }
3976 
3977 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
3978 {
3979 	u32 val;
3980 	int ret;
3981 
3982 	if (rtwdev->chip->chip_id == RTL8852C)
3983 		return 0;
3984 
3985 	rtw89_pci_ctrl_dma_all(rtwdev, false);
3986 	ret = rtw89_pci_poll_io_idle_ax(rtwdev);
3987 	if (ret) {
3988 		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3989 		rtw89_debug(rtwdev, RTW89_DBG_HCI,
3990 			    "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
3991 			    R_AX_DBG_ERR_FLAG, val);
3992 		if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
3993 			rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
3994 		if (val & B_AX_RX_STUCK)
3995 			rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
3996 		rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
3997 		ret = rtw89_pci_poll_io_idle_ax(rtwdev);
3998 		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
3999 		rtw89_debug(rtwdev, RTW89_DBG_HCI,
4000 			    "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
4001 			    R_AX_DBG_ERR_FLAG, val);
4002 	}
4003 
4004 	return ret;
4005 }
4006 
4007 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
4008 {
4009 	u32 ret;
4010 
4011 	if (rtwdev->chip->chip_id == RTL8852C)
4012 		return 0;
4013 
4014 	rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
4015 	rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
4016 	rtw89_pci_clr_idx_all(rtwdev);
4017 
4018 	ret = rtw89_pci_rst_bdram_ax(rtwdev);
4019 	if (ret)
4020 		return ret;
4021 
4022 	rtw89_pci_ctrl_dma_all(rtwdev, true);
4023 	return ret;
4024 }
4025 
4026 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
4027 					  enum rtw89_lv1_rcvy_step step)
4028 {
4029 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4030 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4031 	int ret;
4032 
4033 	switch (step) {
4034 	case RTW89_LV1_RCVY_STEP_1:
4035 		ret = gen_def->lv1rst_stop_dma(rtwdev);
4036 		if (ret)
4037 			rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
4038 
4039 		break;
4040 
4041 	case RTW89_LV1_RCVY_STEP_2:
4042 		ret = gen_def->lv1rst_start_dma(rtwdev);
4043 		if (ret)
4044 			rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
4045 		break;
4046 
4047 	default:
4048 		return -EINVAL;
4049 	}
4050 
4051 	return ret;
4052 }
4053 
4054 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
4055 {
4056 	if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
4057 		return;
4058 
4059 	if (rtwdev->chip->chip_id == RTL8852C) {
4060 		rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4061 			   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1));
4062 		rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4063 			   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1));
4064 	} else {
4065 		rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
4066 			   rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
4067 		rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4068 			   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
4069 		rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4070 			   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
4071 	}
4072 }
4073 
4074 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
4075 {
4076 	struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
4077 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4078 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4079 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4080 	unsigned long flags;
4081 	int work_done;
4082 
4083 	rtwdev->napi_budget_countdown = budget;
4084 
4085 	rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data);
4086 	work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4087 	if (work_done == budget)
4088 		return budget;
4089 
4090 	rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data);
4091 	work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4092 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4093 		spin_lock_irqsave(&rtwpci->irq_lock, flags);
4094 		if (likely(rtwpci->running))
4095 			rtw89_chip_enable_intr(rtwdev, rtwpci);
4096 		spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
4097 	}
4098 
4099 	return work_done;
4100 }
4101 
4102 static int __maybe_unused rtw89_pci_suspend(struct device *dev)
4103 {
4104 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
4105 	struct rtw89_dev *rtwdev = hw->priv;
4106 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4107 
4108 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4109 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4110 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4111 	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
4112 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
4113 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4114 		rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
4115 				  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4116 	} else {
4117 		rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4118 				  B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4119 	}
4120 
4121 	return 0;
4122 }
4123 
4124 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
4125 {
4126 	if (rtwdev->chip->chip_id == RTL8852C)
4127 		return;
4128 
4129 	/* Hardware need write the reg twice to ensure the setting work */
4130 	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4131 				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
4132 	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4133 				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
4134 }
4135 
4136 static int __maybe_unused rtw89_pci_resume(struct device *dev)
4137 {
4138 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
4139 	struct rtw89_dev *rtwdev = hw->priv;
4140 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4141 
4142 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4143 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4144 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4145 	if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
4146 		rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
4147 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4148 		rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
4149 				  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4150 	} else {
4151 		rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4152 				  B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4153 		rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4154 				  B_AX_SEL_REQ_ENTR_L1);
4155 	}
4156 	rtw89_pci_l2_hci_ldo(rtwdev);
4157 	rtw89_pci_filter_out(rtwdev);
4158 	rtw89_pci_link_cfg(rtwdev);
4159 	rtw89_pci_l1ss_cfg(rtwdev);
4160 
4161 	return 0;
4162 }
4163 
4164 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
4165 EXPORT_SYMBOL(rtw89_pm_ops);
4166 
4167 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
4168 	.isr_rdu = B_AX_RDU_INT,
4169 	.isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
4170 	.isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
4171 	.isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
4172 	.isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
4173 					    B_AX_RDU_INT},
4174 
4175 	.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
4176 	.mac_pre_deinit = NULL,
4177 	.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
4178 
4179 	.clr_idx_all = rtw89_pci_clr_idx_all_ax,
4180 	.rst_bdram = rtw89_pci_rst_bdram_ax,
4181 
4182 	.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
4183 	.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
4184 
4185 	.ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
4186 	.ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
4187 	.poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
4188 
4189 	.aspm_set = rtw89_pci_aspm_set_ax,
4190 	.clkreq_set = rtw89_pci_clkreq_set_ax,
4191 	.l1ss_set = rtw89_pci_l1ss_set_ax,
4192 };
4193 EXPORT_SYMBOL(rtw89_pci_gen_ax);
4194 
4195 static const struct rtw89_hci_ops rtw89_pci_ops = {
4196 	.tx_write	= rtw89_pci_ops_tx_write,
4197 	.tx_kick_off	= rtw89_pci_ops_tx_kick_off,
4198 	.flush_queues	= rtw89_pci_ops_flush_queues,
4199 	.reset		= rtw89_pci_ops_reset,
4200 	.start		= rtw89_pci_ops_start,
4201 	.stop		= rtw89_pci_ops_stop,
4202 	.pause		= rtw89_pci_ops_pause,
4203 	.switch_mode	= rtw89_pci_ops_switch_mode,
4204 	.recalc_int_mit = rtw89_pci_recalc_int_mit,
4205 
4206 	.read8		= rtw89_pci_ops_read8,
4207 	.read16		= rtw89_pci_ops_read16,
4208 	.read32		= rtw89_pci_ops_read32,
4209 	.write8		= rtw89_pci_ops_write8,
4210 	.write16	= rtw89_pci_ops_write16,
4211 	.write32	= rtw89_pci_ops_write32,
4212 
4213 	.mac_pre_init	= rtw89_pci_ops_mac_pre_init,
4214 	.mac_pre_deinit	= rtw89_pci_ops_mac_pre_deinit,
4215 	.mac_post_init	= rtw89_pci_ops_mac_post_init,
4216 	.deinit		= rtw89_pci_ops_deinit,
4217 
4218 	.check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
4219 	.mac_lv1_rcvy	= rtw89_pci_ops_mac_lv1_recovery,
4220 	.dump_err_status = rtw89_pci_ops_dump_err_status,
4221 	.napi_poll	= rtw89_pci_napi_poll,
4222 
4223 	.recovery_start = rtw89_pci_ops_recovery_start,
4224 	.recovery_complete = rtw89_pci_ops_recovery_complete,
4225 
4226 	.ctrl_txdma_ch	= rtw89_pci_ctrl_txdma_ch,
4227 	.ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
4228 	.ctrl_trxhci	= rtw89_pci_ctrl_dma_trx,
4229 	.poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
4230 
4231 	.clr_idx_all	= rtw89_pci_clr_idx_all,
4232 	.clear		= rtw89_pci_clear_resource,
4233 	.disable_intr	= rtw89_pci_disable_intr_lock,
4234 	.enable_intr	= rtw89_pci_enable_intr_lock,
4235 	.rst_bdram	= rtw89_pci_reset_bdram,
4236 };
4237 
4238 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4239 {
4240 	struct rtw89_dev *rtwdev;
4241 	const struct rtw89_driver_info *info;
4242 	const struct rtw89_pci_info *pci_info;
4243 	int ret;
4244 
4245 	info = (const struct rtw89_driver_info *)id->driver_data;
4246 
4247 	rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
4248 					  sizeof(struct rtw89_pci),
4249 					  info->chip);
4250 	if (!rtwdev) {
4251 		dev_err(&pdev->dev, "failed to allocate hw\n");
4252 		return -ENOMEM;
4253 	}
4254 
4255 	pci_info = info->bus.pci;
4256 
4257 	rtwdev->pci_info = info->bus.pci;
4258 	rtwdev->hci.ops = &rtw89_pci_ops;
4259 	rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
4260 	rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
4261 	rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
4262 
4263 	rtw89_check_quirks(rtwdev, info->quirks);
4264 
4265 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
4266 
4267 	ret = rtw89_core_init(rtwdev);
4268 	if (ret) {
4269 		rtw89_err(rtwdev, "failed to initialise core\n");
4270 		goto err_release_hw;
4271 	}
4272 
4273 	ret = rtw89_pci_claim_device(rtwdev, pdev);
4274 	if (ret) {
4275 		rtw89_err(rtwdev, "failed to claim pci device\n");
4276 		goto err_core_deinit;
4277 	}
4278 
4279 	ret = rtw89_pci_setup_resource(rtwdev, pdev);
4280 	if (ret) {
4281 		rtw89_err(rtwdev, "failed to setup pci resource\n");
4282 		goto err_declaim_pci;
4283 	}
4284 
4285 	ret = rtw89_chip_info_setup(rtwdev);
4286 	if (ret) {
4287 		rtw89_err(rtwdev, "failed to setup chip information\n");
4288 		goto err_clear_resource;
4289 	}
4290 
4291 	rtw89_pci_filter_out(rtwdev);
4292 	rtw89_pci_link_cfg(rtwdev);
4293 	rtw89_pci_l1ss_cfg(rtwdev);
4294 
4295 	rtw89_core_napi_init(rtwdev);
4296 
4297 	ret = rtw89_pci_request_irq(rtwdev, pdev);
4298 	if (ret) {
4299 		rtw89_err(rtwdev, "failed to request pci irq\n");
4300 		goto err_deinit_napi;
4301 	}
4302 
4303 	ret = rtw89_core_register(rtwdev);
4304 	if (ret) {
4305 		rtw89_err(rtwdev, "failed to register core\n");
4306 		goto err_free_irq;
4307 	}
4308 
4309 	set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
4310 
4311 	return 0;
4312 
4313 err_free_irq:
4314 	rtw89_pci_free_irq(rtwdev, pdev);
4315 err_deinit_napi:
4316 	rtw89_core_napi_deinit(rtwdev);
4317 err_clear_resource:
4318 	rtw89_pci_clear_resource(rtwdev, pdev);
4319 err_declaim_pci:
4320 	rtw89_pci_declaim_device(rtwdev, pdev);
4321 err_core_deinit:
4322 	rtw89_core_deinit(rtwdev);
4323 err_release_hw:
4324 	rtw89_free_ieee80211_hw(rtwdev);
4325 
4326 	return ret;
4327 }
4328 EXPORT_SYMBOL(rtw89_pci_probe);
4329 
4330 void rtw89_pci_remove(struct pci_dev *pdev)
4331 {
4332 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4333 	struct rtw89_dev *rtwdev;
4334 
4335 	rtwdev = hw->priv;
4336 
4337 	rtw89_pci_free_irq(rtwdev, pdev);
4338 	rtw89_core_napi_deinit(rtwdev);
4339 	rtw89_core_unregister(rtwdev);
4340 	rtw89_pci_clear_resource(rtwdev, pdev);
4341 	rtw89_pci_declaim_device(rtwdev, pdev);
4342 	rtw89_core_deinit(rtwdev);
4343 	rtw89_free_ieee80211_hw(rtwdev);
4344 }
4345 EXPORT_SYMBOL(rtw89_pci_remove);
4346 
4347 MODULE_AUTHOR("Realtek Corporation");
4348 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver");
4349 MODULE_LICENSE("Dual BSD/GPL");
4350