xref: /linux/drivers/net/wireless/realtek/rtw89/pci.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2020  Realtek Corporation
3  */
4 
5 #include <linux/pci.h>
6 
7 #include "mac.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "ser.h"
11 
12 static bool rtw89_pci_disable_clkreq;
13 static bool rtw89_pci_disable_aspm_l1;
14 static bool rtw89_pci_disable_l1ss;
15 module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
16 module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
17 module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
18 MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
19 MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
20 MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
21 
rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev * rtwdev,u32 * phy_offset)22 static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
23 						  u32 *phy_offset)
24 {
25 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
26 	struct pci_dev *pdev = rtwpci->pdev;
27 	u32 val;
28 	int ret;
29 
30 	ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
31 	if (ret)
32 		return ret;
33 
34 	val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK);
35 	if (val == RTW89_PCIE_GEN1_SPEED) {
36 		*phy_offset = R_RAC_DIRECT_OFFSET_G1;
37 	} else if (val == RTW89_PCIE_GEN2_SPEED) {
38 		*phy_offset = R_RAC_DIRECT_OFFSET_G2;
39 	} else {
40 		rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val);
41 		return -EFAULT;
42 	}
43 
44 	return 0;
45 }
46 
rtw89_pci_rst_bdram_ax(struct rtw89_dev * rtwdev)47 static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
48 {
49 	u32 val;
50 	int ret;
51 
52 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RST_BDRAM);
53 
54 	ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
55 				       1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
56 				       rtwdev, R_AX_PCIE_INIT_CFG1);
57 
58 	return ret;
59 }
60 
rtw89_pci_dma_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring,u32 cur_idx,bool tx)61 static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
62 				struct rtw89_pci_dma_ring *bd_ring,
63 				u32 cur_idx, bool tx)
64 {
65 	const struct rtw89_pci_info *info = rtwdev->pci_info;
66 	u32 cnt, cur_rp, wp, rp, len;
67 
68 	rp = bd_ring->rp;
69 	wp = bd_ring->wp;
70 	len = bd_ring->len;
71 
72 	cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
73 	if (tx) {
74 		cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
75 	} else {
76 		if (info->rx_ring_eq_is_full)
77 			wp += 1;
78 
79 		cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
80 	}
81 
82 	bd_ring->rp = cur_rp;
83 
84 	return cnt;
85 }
86 
rtw89_pci_txbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)87 static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
88 				 struct rtw89_pci_tx_ring *tx_ring)
89 {
90 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
91 	u32 addr_idx = bd_ring->addr.idx;
92 	u32 cnt, idx;
93 
94 	idx = rtw89_read32(rtwdev, addr_idx);
95 	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
96 
97 	return cnt;
98 }
99 
rtw89_pci_release_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,u32 cnt,bool release_all)100 static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
101 				    struct rtw89_pci *rtwpci,
102 				    u32 cnt, bool release_all)
103 {
104 	struct rtw89_pci_tx_data *tx_data;
105 	struct sk_buff *skb;
106 	u32 qlen;
107 
108 	while (cnt--) {
109 		skb = skb_dequeue(&rtwpci->h2c_queue);
110 		if (!skb) {
111 			rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
112 			return;
113 		}
114 		skb_queue_tail(&rtwpci->h2c_release_queue, skb);
115 	}
116 
117 	qlen = skb_queue_len(&rtwpci->h2c_release_queue);
118 	if (!release_all)
119 	       qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
120 
121 	while (qlen--) {
122 		skb = skb_dequeue(&rtwpci->h2c_release_queue);
123 		if (!skb) {
124 			rtw89_err(rtwdev, "failed to release fwcmd\n");
125 			return;
126 		}
127 		tx_data = RTW89_PCI_TX_SKB_CB(skb);
128 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
129 				 DMA_TO_DEVICE);
130 		dev_kfree_skb_any(skb);
131 	}
132 }
133 
rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)134 static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
135 				       struct rtw89_pci *rtwpci)
136 {
137 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
138 	u32 cnt;
139 
140 	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
141 	if (!cnt)
142 		return;
143 	rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
144 }
145 
rtw89_pci_rxbd_recalc(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)146 static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
147 				 struct rtw89_pci_rx_ring *rx_ring)
148 {
149 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
150 	u32 addr_idx = bd_ring->addr.idx;
151 	u32 cnt, idx;
152 
153 	idx = rtw89_read32(rtwdev, addr_idx);
154 	cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
155 
156 	return cnt;
157 }
158 
rtw89_pci_sync_skb_for_cpu(struct rtw89_dev * rtwdev,struct sk_buff * skb)159 static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
160 				       struct sk_buff *skb)
161 {
162 	struct rtw89_pci_rx_info *rx_info;
163 	dma_addr_t dma;
164 
165 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
166 	dma = rx_info->dma;
167 	dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
168 				DMA_FROM_DEVICE);
169 }
170 
rtw89_pci_sync_skb_for_device(struct rtw89_dev * rtwdev,struct sk_buff * skb)171 static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
172 					  struct sk_buff *skb)
173 {
174 	struct rtw89_pci_rx_info *rx_info;
175 	dma_addr_t dma;
176 
177 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
178 	dma = rx_info->dma;
179 	dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
180 				   DMA_FROM_DEVICE);
181 }
182 
rtw89_pci_rxbd_info_update(struct rtw89_dev * rtwdev,struct sk_buff * skb)183 static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
184 				       struct sk_buff *skb)
185 {
186 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
187 	struct rtw89_pci_rxbd_info *rxbd_info;
188 	__le32 info;
189 
190 	rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
191 	info = rxbd_info->dword;
192 
193 	rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS);
194 	rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS);
195 	rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE);
196 	rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG);
197 }
198 
rtw89_pci_validate_rx_tag(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb)199 static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
200 				     struct rtw89_pci_rx_ring *rx_ring,
201 				     struct sk_buff *skb)
202 {
203 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
204 	const struct rtw89_pci_info *info = rtwdev->pci_info;
205 	u32 target_rx_tag;
206 
207 	if (!info->check_rx_tag)
208 		return 0;
209 
210 	/* valid range is 1 ~ 0x1FFF */
211 	if (rx_ring->target_rx_tag == 0)
212 		target_rx_tag = 1;
213 	else
214 		target_rx_tag = rx_ring->target_rx_tag;
215 
216 	if (rx_info->tag != target_rx_tag) {
217 		rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "mismatch RX tag 0x%x 0x%x\n",
218 			    rx_info->tag, target_rx_tag);
219 		return -EAGAIN;
220 	}
221 
222 	return 0;
223 }
224 
225 static
rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb)226 int rtw89_pci_sync_skb_for_device_and_validate_rx_info(struct rtw89_dev *rtwdev,
227 						       struct rtw89_pci_rx_ring *rx_ring,
228 						       struct sk_buff *skb)
229 {
230 	struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
231 	int rx_tag_retry = 1000;
232 	int ret;
233 
234 	do {
235 		rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
236 		rtw89_pci_rxbd_info_update(rtwdev, skb);
237 
238 		ret = rtw89_pci_validate_rx_tag(rtwdev, rx_ring, skb);
239 		if (ret != -EAGAIN)
240 			break;
241 	} while (rx_tag_retry--);
242 
243 	/* update target rx_tag for next RX */
244 	rx_ring->target_rx_tag = rx_info->tag + 1;
245 
246 	return ret;
247 }
248 
rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev * rtwdev,bool enable)249 static void rtw89_pci_ctrl_txdma_ch_ax(struct rtw89_dev *rtwdev, bool enable)
250 {
251 	const struct rtw89_pci_info *info = rtwdev->pci_info;
252 	const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
253 	const struct rtw89_reg_def *dma_stop2 = &info->dma_stop2;
254 
255 	if (enable) {
256 		rtw89_write32_clr(rtwdev, dma_stop1->addr, dma_stop1->mask);
257 		if (dma_stop2->addr)
258 			rtw89_write32_clr(rtwdev, dma_stop2->addr, dma_stop2->mask);
259 	} else {
260 		rtw89_write32_set(rtwdev, dma_stop1->addr, dma_stop1->mask);
261 		if (dma_stop2->addr)
262 			rtw89_write32_set(rtwdev, dma_stop2->addr, dma_stop2->mask);
263 	}
264 }
265 
rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev * rtwdev,bool enable)266 static void rtw89_pci_ctrl_txdma_fw_ch_ax(struct rtw89_dev *rtwdev, bool enable)
267 {
268 	const struct rtw89_pci_info *info = rtwdev->pci_info;
269 	const struct rtw89_reg_def *dma_stop1 = &info->dma_stop1;
270 
271 	if (enable)
272 		rtw89_write32_clr(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
273 	else
274 		rtw89_write32_set(rtwdev, dma_stop1->addr, B_AX_STOP_CH12);
275 }
276 
277 static bool
rtw89_skb_put_rx_data(struct rtw89_dev * rtwdev,bool fs,bool ls,struct sk_buff * new,const struct sk_buff * skb,u32 offset,const struct rtw89_pci_rx_info * rx_info,const struct rtw89_rx_desc_info * desc_info)278 rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
279 		      struct sk_buff *new,
280 		      const struct sk_buff *skb, u32 offset,
281 		      const struct rtw89_pci_rx_info *rx_info,
282 		      const struct rtw89_rx_desc_info *desc_info)
283 {
284 	u32 copy_len = rx_info->len - offset;
285 
286 	if (unlikely(skb_tailroom(new) < copy_len)) {
287 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
288 			    "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
289 			    rx_info->len, desc_info->pkt_size, offset, fs, ls);
290 		rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
291 			       skb->data, rx_info->len);
292 		/* length of a single segment skb is desc_info->pkt_size */
293 		if (fs && ls) {
294 			copy_len = desc_info->pkt_size;
295 		} else {
296 			rtw89_info(rtwdev, "drop rx data due to invalid length\n");
297 			return false;
298 		}
299 	}
300 
301 	skb_put_data(new, skb->data + offset, copy_len);
302 
303 	return true;
304 }
305 
rtw89_pci_get_rx_skb_idx(struct rtw89_dev * rtwdev,struct rtw89_pci_dma_ring * bd_ring)306 static u32 rtw89_pci_get_rx_skb_idx(struct rtw89_dev *rtwdev,
307 				    struct rtw89_pci_dma_ring *bd_ring)
308 {
309 	const struct rtw89_pci_info *info = rtwdev->pci_info;
310 	u32 wp = bd_ring->wp;
311 
312 	if (!info->rx_ring_eq_is_full)
313 		return wp;
314 
315 	if (++wp >= bd_ring->len)
316 		wp = 0;
317 
318 	return wp;
319 }
320 
rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring)321 static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
322 				       struct rtw89_pci_rx_ring *rx_ring)
323 {
324 	struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
325 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
326 	const struct rtw89_pci_info *info = rtwdev->pci_info;
327 	struct sk_buff *new = rx_ring->diliver_skb;
328 	struct rtw89_pci_rx_info *rx_info;
329 	struct sk_buff *skb;
330 	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
331 	u32 skb_idx;
332 	u32 offset;
333 	u32 cnt = 1;
334 	bool fs, ls;
335 	int ret;
336 
337 	skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
338 	skb = rx_ring->buf[skb_idx];
339 
340 	ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
341 	if (ret) {
342 		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
343 			  bd_ring->wp, ret);
344 		goto err_sync_device;
345 	}
346 
347 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
348 	fs = info->no_rxbd_fs ? !new : rx_info->fs;
349 	ls = rx_info->ls;
350 
351 	if (unlikely(!fs || !ls))
352 		rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
353 			    "unexpected fs/ls=%d/%d tag=%u len=%u new->len=%u\n",
354 			    fs, ls, rx_info->tag, rx_info->len, new ? new->len : 0);
355 
356 	if (fs) {
357 		if (new) {
358 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP,
359 				    "skb should not be ready before first segment start\n");
360 			goto err_sync_device;
361 		}
362 		if (desc_info->ready) {
363 			rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
364 			goto err_sync_device;
365 		}
366 
367 		rtw89_chip_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
368 
369 		new = rtw89_alloc_skb_for_rx(rtwdev, desc_info->pkt_size);
370 		if (!new)
371 			goto err_sync_device;
372 
373 		rx_ring->diliver_skb = new;
374 
375 		/* first segment has RX desc */
376 		offset = desc_info->offset + desc_info->rxd_len;
377 	} else {
378 		offset = sizeof(struct rtw89_pci_rxbd_info);
379 		if (!new) {
380 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "no last skb\n");
381 			goto err_sync_device;
382 		}
383 	}
384 	if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
385 		goto err_sync_device;
386 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
387 	rtw89_pci_rxbd_increase(rx_ring, 1);
388 
389 	if (!desc_info->ready) {
390 		rtw89_warn(rtwdev, "no rx desc information\n");
391 		goto err_free_resource;
392 	}
393 	if (ls) {
394 		rtw89_core_rx(rtwdev, desc_info, new);
395 		rx_ring->diliver_skb = NULL;
396 		desc_info->ready = false;
397 	}
398 
399 	return cnt;
400 
401 err_sync_device:
402 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
403 	rtw89_pci_rxbd_increase(rx_ring, 1);
404 err_free_resource:
405 	if (new)
406 		dev_kfree_skb_any(new);
407 	rx_ring->diliver_skb = NULL;
408 	desc_info->ready = false;
409 
410 	return cnt;
411 }
412 
rtw89_pci_rxbd_deliver(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)413 static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
414 				   struct rtw89_pci_rx_ring *rx_ring,
415 				   u32 cnt)
416 {
417 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
418 	u32 rx_cnt;
419 
420 	while (cnt && rtwdev->napi_budget_countdown > 0) {
421 		rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
422 		if (!rx_cnt) {
423 			rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
424 
425 			/* skip the rest RXBD bufs */
426 			rtw89_pci_rxbd_increase(rx_ring, cnt);
427 			break;
428 		}
429 
430 		cnt -= rx_cnt;
431 	}
432 
433 	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
434 }
435 
rtw89_pci_poll_rxq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)436 static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
437 				  struct rtw89_pci *rtwpci, int budget)
438 {
439 	struct rtw89_pci_rx_ring *rx_ring;
440 	int countdown = rtwdev->napi_budget_countdown;
441 	u32 cnt;
442 
443 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
444 
445 	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
446 	if (!cnt)
447 		return 0;
448 
449 	cnt = min_t(u32, budget, cnt);
450 
451 	rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
452 
453 	/* In case of flushing pending SKBs, the countdown may exceed. */
454 	if (rtwdev->napi_budget_countdown <= 0)
455 		return budget;
456 
457 	return budget - countdown;
458 }
459 
rtw89_pci_tx_status(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct sk_buff * skb,u8 tx_status)460 static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
461 				struct rtw89_pci_tx_ring *tx_ring,
462 				struct sk_buff *skb, u8 tx_status)
463 {
464 	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
465 	struct ieee80211_tx_info *info;
466 
467 	rtw89_core_tx_wait_complete(rtwdev, skb_data, tx_status == RTW89_TX_DONE);
468 
469 	info = IEEE80211_SKB_CB(skb);
470 	ieee80211_tx_info_clear_status(info);
471 
472 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
473 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
474 	if (tx_status == RTW89_TX_DONE) {
475 		info->flags |= IEEE80211_TX_STAT_ACK;
476 		tx_ring->tx_acked++;
477 	} else {
478 		if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
479 			rtw89_debug(rtwdev, RTW89_DBG_FW,
480 				    "failed to TX of status %x\n", tx_status);
481 		switch (tx_status) {
482 		case RTW89_TX_RETRY_LIMIT:
483 			tx_ring->tx_retry_lmt++;
484 			break;
485 		case RTW89_TX_LIFE_TIME:
486 			tx_ring->tx_life_time++;
487 			break;
488 		case RTW89_TX_MACID_DROP:
489 			tx_ring->tx_mac_id_drop++;
490 			break;
491 		default:
492 			rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
493 			break;
494 		}
495 	}
496 
497 	ieee80211_tx_status_ni(rtwdev->hw, skb);
498 }
499 
rtw89_pci_reclaim_txbd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)500 static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
501 {
502 	struct rtw89_pci_tx_wd *txwd;
503 	u32 cnt;
504 
505 	cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
506 	while (cnt--) {
507 		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
508 		if (!txwd) {
509 			rtw89_warn(rtwdev, "No busy txwd pages available\n");
510 			break;
511 		}
512 
513 		list_del_init(&txwd->list);
514 
515 		/* this skb has been freed by RPP */
516 		if (skb_queue_len(&txwd->queue) == 0)
517 			rtw89_pci_enqueue_txwd(tx_ring, txwd);
518 	}
519 }
520 
rtw89_pci_release_busy_txwd(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)521 static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
522 					struct rtw89_pci_tx_ring *tx_ring)
523 {
524 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
525 	struct rtw89_pci_tx_wd *txwd;
526 	int i;
527 
528 	for (i = 0; i < wd_ring->page_num; i++) {
529 		txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
530 		if (!txwd)
531 			break;
532 
533 		list_del_init(&txwd->list);
534 	}
535 }
536 
rtw89_pci_release_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,u16 seq,u8 tx_status)537 static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
538 				       struct rtw89_pci_tx_ring *tx_ring,
539 				       struct rtw89_pci_tx_wd *txwd, u16 seq,
540 				       u8 tx_status)
541 {
542 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
543 	struct rtw89_pci_tx_data *tx_data;
544 	struct sk_buff *skb, *tmp;
545 	u8 txch = tx_ring->txch;
546 
547 	if (!list_empty(&txwd->list)) {
548 		rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
549 		/* In low power mode, RPP can receive before updating of TX BD.
550 		 * In normal mode, it should not happen so give it a warning.
551 		 */
552 		if (!rtwpci->low_power && !list_empty(&txwd->list))
553 			rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
554 				   txch, seq);
555 	}
556 
557 	skb_queue_walk_safe(&txwd->queue, skb, tmp) {
558 		skb_unlink(skb, &txwd->queue);
559 
560 		tx_data = RTW89_PCI_TX_SKB_CB(skb);
561 		dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
562 				 DMA_TO_DEVICE);
563 
564 		rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
565 	}
566 
567 	if (list_empty(&txwd->list))
568 		rtw89_pci_enqueue_txwd(tx_ring, txwd);
569 }
570 
rtw89_pci_release_rpp(struct rtw89_dev * rtwdev,struct rtw89_pci_rpp_fmt * rpp)571 static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
572 				  struct rtw89_pci_rpp_fmt *rpp)
573 {
574 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
575 	struct rtw89_pci_tx_ring *tx_ring;
576 	struct rtw89_pci_tx_wd_ring *wd_ring;
577 	struct rtw89_pci_tx_wd *txwd;
578 	u16 seq;
579 	u8 qsel, tx_status, txch;
580 
581 	seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
582 	qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
583 	tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
584 	txch = rtw89_core_get_ch_dma(rtwdev, qsel);
585 
586 	if (txch == RTW89_TXCH_CH12) {
587 		rtw89_warn(rtwdev, "should no fwcmd release report\n");
588 		return;
589 	}
590 
591 	tx_ring = &rtwpci->tx_rings[txch];
592 	wd_ring = &tx_ring->wd_ring;
593 	txwd = &wd_ring->pages[seq];
594 
595 	rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
596 }
597 
rtw89_pci_release_pending_txwd_skb(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)598 static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
599 					       struct rtw89_pci_tx_ring *tx_ring)
600 {
601 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
602 	struct rtw89_pci_tx_wd *txwd;
603 	int i;
604 
605 	for (i = 0; i < wd_ring->page_num; i++) {
606 		txwd = &wd_ring->pages[i];
607 
608 		if (!list_empty(&txwd->list))
609 			continue;
610 
611 		rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
612 	}
613 }
614 
rtw89_pci_release_tx_skbs(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 max_cnt)615 static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
616 				     struct rtw89_pci_rx_ring *rx_ring,
617 				     u32 max_cnt)
618 {
619 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
620 	struct rtw89_pci_rx_info *rx_info;
621 	struct rtw89_pci_rpp_fmt *rpp;
622 	struct rtw89_rx_desc_info desc_info = {};
623 	struct sk_buff *skb;
624 	u32 cnt = 0;
625 	u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
626 	u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
627 	u32 skb_idx;
628 	u32 offset;
629 	int ret;
630 
631 	skb_idx = rtw89_pci_get_rx_skb_idx(rtwdev, bd_ring);
632 	skb = rx_ring->buf[skb_idx];
633 
634 	ret = rtw89_pci_sync_skb_for_device_and_validate_rx_info(rtwdev, rx_ring, skb);
635 	if (ret) {
636 		rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
637 			  bd_ring->wp, ret);
638 		goto err_sync_device;
639 	}
640 
641 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
642 	if (!rx_info->fs || !rx_info->ls) {
643 		rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
644 		return cnt;
645 	}
646 
647 	rtw89_chip_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
648 
649 	/* first segment has RX desc */
650 	offset = desc_info.offset + desc_info.rxd_len;
651 	for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
652 		rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
653 		rtw89_pci_release_rpp(rtwdev, rpp);
654 	}
655 
656 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
657 	rtw89_pci_rxbd_increase(rx_ring, 1);
658 	cnt++;
659 
660 	return cnt;
661 
662 err_sync_device:
663 	rtw89_pci_sync_skb_for_device(rtwdev, skb);
664 	return 0;
665 }
666 
rtw89_pci_release_tx(struct rtw89_dev * rtwdev,struct rtw89_pci_rx_ring * rx_ring,u32 cnt)667 static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
668 				 struct rtw89_pci_rx_ring *rx_ring,
669 				 u32 cnt)
670 {
671 	struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
672 	u32 release_cnt;
673 
674 	while (cnt) {
675 		release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
676 		if (!release_cnt) {
677 			rtw89_err(rtwdev, "failed to release TX skbs\n");
678 
679 			/* skip the rest RXBD bufs */
680 			rtw89_pci_rxbd_increase(rx_ring, cnt);
681 			break;
682 		}
683 
684 		cnt -= release_cnt;
685 	}
686 
687 	rtw89_write16(rtwdev, bd_ring->addr.idx, bd_ring->wp);
688 }
689 
rtw89_pci_poll_rpq_dma(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,int budget)690 static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
691 				  struct rtw89_pci *rtwpci, int budget)
692 {
693 	struct rtw89_pci_rx_ring *rx_ring;
694 	u32 cnt;
695 	int work_done;
696 
697 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
698 
699 	spin_lock_bh(&rtwpci->trx_lock);
700 
701 	cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
702 	if (cnt == 0)
703 		goto out_unlock;
704 
705 	rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
706 
707 out_unlock:
708 	spin_unlock_bh(&rtwpci->trx_lock);
709 
710 	/* always release all RPQ */
711 	work_done = min_t(int, cnt, budget);
712 	rtwdev->napi_budget_countdown -= work_done;
713 
714 	return work_done;
715 }
716 
rtw89_pci_isr_rxd_unavail(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)717 static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
718 				      struct rtw89_pci *rtwpci)
719 {
720 	struct rtw89_pci_rx_ring *rx_ring;
721 	struct rtw89_pci_dma_ring *bd_ring;
722 	u32 reg_idx;
723 	u16 hw_idx, hw_idx_next, host_idx;
724 	int i;
725 
726 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
727 		rx_ring = &rtwpci->rx_rings[i];
728 		bd_ring = &rx_ring->bd_ring;
729 
730 		reg_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
731 		hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
732 		host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
733 		hw_idx_next = (hw_idx + 1) % bd_ring->len;
734 
735 		if (hw_idx_next == host_idx)
736 			rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "%d RXD unavailable\n", i);
737 
738 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
739 			    "%d RXD unavailable, idx=0x%08x, len=%d\n",
740 			    i, reg_idx, bd_ring->len);
741 	}
742 }
743 
rtw89_pci_recognize_intrs(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)744 void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
745 			       struct rtw89_pci *rtwpci,
746 			       struct rtw89_pci_isrs *isrs)
747 {
748 	isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
749 	isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
750 	isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
751 
752 	rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
753 	rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
754 	rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
755 }
756 EXPORT_SYMBOL(rtw89_pci_recognize_intrs);
757 
rtw89_pci_recognize_intrs_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)758 void rtw89_pci_recognize_intrs_v1(struct rtw89_dev *rtwdev,
759 				  struct rtw89_pci *rtwpci,
760 				  struct rtw89_pci_isrs *isrs)
761 {
762 	isrs->ind_isrs = rtw89_read32(rtwdev, R_AX_PCIE_HISR00_V1) & rtwpci->ind_intrs;
763 	isrs->halt_c2h_isrs = isrs->ind_isrs & B_AX_HS0ISR_IND_INT_EN ?
764 			      rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs : 0;
765 	isrs->isrs[0] = isrs->ind_isrs & B_AX_HCI_AXIDMA_INT_EN ?
766 			rtw89_read32(rtwdev, R_AX_HAXI_HISR00) & rtwpci->intrs[0] : 0;
767 	isrs->isrs[1] = isrs->ind_isrs & B_AX_HS1ISR_IND_INT_EN ?
768 			rtw89_read32(rtwdev, R_AX_HISR1) & rtwpci->intrs[1] : 0;
769 
770 	if (isrs->halt_c2h_isrs)
771 		rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
772 	if (isrs->isrs[0])
773 		rtw89_write32(rtwdev, R_AX_HAXI_HISR00, isrs->isrs[0]);
774 	if (isrs->isrs[1])
775 		rtw89_write32(rtwdev, R_AX_HISR1, isrs->isrs[1]);
776 }
777 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v1);
778 
rtw89_pci_recognize_intrs_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci,struct rtw89_pci_isrs * isrs)779 void rtw89_pci_recognize_intrs_v2(struct rtw89_dev *rtwdev,
780 				  struct rtw89_pci *rtwpci,
781 				  struct rtw89_pci_isrs *isrs)
782 {
783 	isrs->ind_isrs = rtw89_read32(rtwdev, R_BE_PCIE_HISR) & rtwpci->ind_intrs;
784 	isrs->halt_c2h_isrs = isrs->ind_isrs & B_BE_HS0ISR_IND_INT ?
785 			      rtw89_read32(rtwdev, R_BE_HISR0) & rtwpci->halt_c2h_intrs : 0;
786 	isrs->isrs[0] = isrs->ind_isrs & B_BE_HCI_AXIDMA_INT ?
787 			rtw89_read32(rtwdev, R_BE_HAXI_HISR00) & rtwpci->intrs[0] : 0;
788 	isrs->isrs[1] = rtw89_read32(rtwdev, R_BE_PCIE_DMA_ISR) & rtwpci->intrs[1];
789 
790 	if (isrs->halt_c2h_isrs)
791 		rtw89_write32(rtwdev, R_BE_HISR0, isrs->halt_c2h_isrs);
792 	if (isrs->isrs[0])
793 		rtw89_write32(rtwdev, R_BE_HAXI_HISR00, isrs->isrs[0]);
794 	if (isrs->isrs[1])
795 		rtw89_write32(rtwdev, R_BE_PCIE_DMA_ISR, isrs->isrs[1]);
796 	rtw89_write32(rtwdev, R_BE_PCIE_HISR, isrs->ind_isrs);
797 }
798 EXPORT_SYMBOL(rtw89_pci_recognize_intrs_v2);
799 
rtw89_pci_enable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)800 void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
801 {
802 	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
803 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
804 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
805 }
806 EXPORT_SYMBOL(rtw89_pci_enable_intr);
807 
rtw89_pci_disable_intr(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)808 void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
809 {
810 	rtw89_write32(rtwdev, R_AX_HIMR0, 0);
811 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
812 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
813 }
814 EXPORT_SYMBOL(rtw89_pci_disable_intr);
815 
rtw89_pci_enable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)816 void rtw89_pci_enable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
817 {
818 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, rtwpci->ind_intrs);
819 	rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
820 	rtw89_write32(rtwdev, R_AX_HAXI_HIMR00, rtwpci->intrs[0]);
821 	rtw89_write32(rtwdev, R_AX_HIMR1, rtwpci->intrs[1]);
822 }
823 EXPORT_SYMBOL(rtw89_pci_enable_intr_v1);
824 
rtw89_pci_disable_intr_v1(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)825 void rtw89_pci_disable_intr_v1(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
826 {
827 	rtw89_write32(rtwdev, R_AX_PCIE_HIMR00_V1, 0);
828 }
829 EXPORT_SYMBOL(rtw89_pci_disable_intr_v1);
830 
rtw89_pci_enable_intr_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)831 void rtw89_pci_enable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
832 {
833 	rtw89_write32(rtwdev, R_BE_HIMR0, rtwpci->halt_c2h_intrs);
834 	rtw89_write32(rtwdev, R_BE_HAXI_HIMR00, rtwpci->intrs[0]);
835 	rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, rtwpci->intrs[1]);
836 	rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, rtwpci->ind_intrs);
837 }
838 EXPORT_SYMBOL(rtw89_pci_enable_intr_v2);
839 
rtw89_pci_disable_intr_v2(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)840 void rtw89_pci_disable_intr_v2(struct rtw89_dev *rtwdev, struct rtw89_pci *rtwpci)
841 {
842 	rtw89_write32(rtwdev, R_BE_PCIE_HIMR0, 0);
843 	rtw89_write32(rtwdev, R_BE_PCIE_DMA_IMR_0_V1, 0);
844 }
845 EXPORT_SYMBOL(rtw89_pci_disable_intr_v2);
846 
rtw89_pci_ops_recovery_start(struct rtw89_dev * rtwdev)847 static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
848 {
849 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
850 	unsigned long flags;
851 
852 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
853 	rtw89_chip_disable_intr(rtwdev, rtwpci);
854 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_START);
855 	rtw89_chip_enable_intr(rtwdev, rtwpci);
856 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
857 }
858 
rtw89_pci_ops_recovery_complete(struct rtw89_dev * rtwdev)859 static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
860 {
861 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
862 	unsigned long flags;
863 
864 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
865 	rtw89_chip_disable_intr(rtwdev, rtwpci);
866 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RECOVERY_COMPLETE);
867 	rtw89_chip_enable_intr(rtwdev, rtwpci);
868 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
869 }
870 
rtw89_pci_low_power_interrupt_handler(struct rtw89_dev * rtwdev)871 static void rtw89_pci_low_power_interrupt_handler(struct rtw89_dev *rtwdev)
872 {
873 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
874 	int budget = NAPI_POLL_WEIGHT;
875 
876 	/* To prevent RXQ get stuck due to run out of budget. */
877 	rtwdev->napi_budget_countdown = budget;
878 
879 	rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, budget);
880 	rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, budget);
881 }
882 
rtw89_pci_interrupt_threadfn(int irq,void * dev)883 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
884 {
885 	struct rtw89_dev *rtwdev = dev;
886 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
887 	const struct rtw89_pci_info *info = rtwdev->pci_info;
888 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
889 	struct rtw89_pci_isrs isrs;
890 	unsigned long flags;
891 
892 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
893 	rtw89_chip_recognize_intrs(rtwdev, rtwpci, &isrs);
894 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
895 
896 	if (unlikely(isrs.isrs[0] & gen_def->isr_rdu))
897 		rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
898 
899 	if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_halt_c2h))
900 		rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
901 
902 	if (unlikely(isrs.halt_c2h_isrs & gen_def->isr_wdt_timeout))
903 		rtw89_ser_notify(rtwdev, MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT);
904 
905 	if (unlikely(rtwpci->under_recovery))
906 		goto enable_intr;
907 
908 	if (unlikely(rtwpci->low_power)) {
909 		rtw89_pci_low_power_interrupt_handler(rtwdev);
910 		goto enable_intr;
911 	}
912 
913 	if (likely(rtwpci->running)) {
914 		local_bh_disable();
915 		napi_schedule(&rtwdev->napi);
916 		local_bh_enable();
917 	}
918 
919 	return IRQ_HANDLED;
920 
921 enable_intr:
922 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
923 	if (likely(rtwpci->running))
924 		rtw89_chip_enable_intr(rtwdev, rtwpci);
925 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
926 	return IRQ_HANDLED;
927 }
928 
rtw89_pci_interrupt_handler(int irq,void * dev)929 static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
930 {
931 	struct rtw89_dev *rtwdev = dev;
932 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
933 	unsigned long flags;
934 	irqreturn_t irqret = IRQ_WAKE_THREAD;
935 
936 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
937 
938 	/* If interrupt event is on the road, it is still trigger interrupt
939 	 * even we have done pci_stop() to turn off IMR.
940 	 */
941 	if (unlikely(!rtwpci->running)) {
942 		irqret = IRQ_HANDLED;
943 		goto exit;
944 	}
945 
946 	rtw89_chip_disable_intr(rtwdev, rtwpci);
947 exit:
948 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
949 
950 	return irqret;
951 }
952 
953 #define DEF_TXCHADDRS_TYPE2(gen, ch_idx, txch, v...) \
954 	[RTW89_TXCH_##ch_idx] = { \
955 		.num = R_##gen##_##txch##_TXBD_NUM ##v, \
956 		.idx = R_##gen##_##txch##_TXBD_IDX ##v, \
957 		.bdram = 0, \
958 		.desa_l = R_##gen##_##txch##_TXBD_DESA_L ##v, \
959 		.desa_h = R_##gen##_##txch##_TXBD_DESA_H ##v, \
960 	}
961 
962 #define DEF_TXCHADDRS_TYPE1(info, txch, v...) \
963 	[RTW89_TXCH_##txch] = { \
964 		.num = R_AX_##txch##_TXBD_NUM ##v, \
965 		.idx = R_AX_##txch##_TXBD_IDX ##v, \
966 		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
967 		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
968 		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
969 	}
970 
971 #define DEF_TXCHADDRS(info, txch, v...) \
972 	[RTW89_TXCH_##txch] = { \
973 		.num = R_AX_##txch##_TXBD_NUM, \
974 		.idx = R_AX_##txch##_TXBD_IDX, \
975 		.bdram = R_AX_##txch##_BDRAM_CTRL ##v, \
976 		.desa_l = R_AX_##txch##_TXBD_DESA_L ##v, \
977 		.desa_h = R_AX_##txch##_TXBD_DESA_H ##v, \
978 	}
979 
980 #define DEF_RXCHADDRS(gen, ch_idx, rxch, v...) \
981 	[RTW89_RXCH_##ch_idx] = { \
982 		.num = R_##gen##_##rxch##_RXBD_NUM ##v, \
983 		.idx = R_##gen##_##rxch##_RXBD_IDX ##v, \
984 		.desa_l = R_##gen##_##rxch##_RXBD_DESA_L ##v, \
985 		.desa_h = R_##gen##_##rxch##_RXBD_DESA_H ##v, \
986 	}
987 
988 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set = {
989 	.tx = {
990 		DEF_TXCHADDRS(info, ACH0),
991 		DEF_TXCHADDRS(info, ACH1),
992 		DEF_TXCHADDRS(info, ACH2),
993 		DEF_TXCHADDRS(info, ACH3),
994 		DEF_TXCHADDRS(info, ACH4),
995 		DEF_TXCHADDRS(info, ACH5),
996 		DEF_TXCHADDRS(info, ACH6),
997 		DEF_TXCHADDRS(info, ACH7),
998 		DEF_TXCHADDRS(info, CH8),
999 		DEF_TXCHADDRS(info, CH9),
1000 		DEF_TXCHADDRS_TYPE1(info, CH10),
1001 		DEF_TXCHADDRS_TYPE1(info, CH11),
1002 		DEF_TXCHADDRS(info, CH12),
1003 	},
1004 	.rx = {
1005 		DEF_RXCHADDRS(AX, RXQ, RXQ),
1006 		DEF_RXCHADDRS(AX, RPQ, RPQ),
1007 	},
1008 };
1009 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set);
1010 
1011 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1 = {
1012 	.tx = {
1013 		DEF_TXCHADDRS(info, ACH0, _V1),
1014 		DEF_TXCHADDRS(info, ACH1, _V1),
1015 		DEF_TXCHADDRS(info, ACH2, _V1),
1016 		DEF_TXCHADDRS(info, ACH3, _V1),
1017 		DEF_TXCHADDRS(info, ACH4, _V1),
1018 		DEF_TXCHADDRS(info, ACH5, _V1),
1019 		DEF_TXCHADDRS(info, ACH6, _V1),
1020 		DEF_TXCHADDRS(info, ACH7, _V1),
1021 		DEF_TXCHADDRS(info, CH8, _V1),
1022 		DEF_TXCHADDRS(info, CH9, _V1),
1023 		DEF_TXCHADDRS_TYPE1(info, CH10, _V1),
1024 		DEF_TXCHADDRS_TYPE1(info, CH11, _V1),
1025 		DEF_TXCHADDRS(info, CH12, _V1),
1026 	},
1027 	.rx = {
1028 		DEF_RXCHADDRS(AX, RXQ, RXQ, _V1),
1029 		DEF_RXCHADDRS(AX, RPQ, RPQ, _V1),
1030 	},
1031 };
1032 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_v1);
1033 
1034 const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_be = {
1035 	.tx = {
1036 		DEF_TXCHADDRS_TYPE2(BE, ACH0, CH0, _V1),
1037 		DEF_TXCHADDRS_TYPE2(BE, ACH1, CH1, _V1),
1038 		DEF_TXCHADDRS_TYPE2(BE, ACH2, CH2, _V1),
1039 		DEF_TXCHADDRS_TYPE2(BE, ACH3, CH3, _V1),
1040 		DEF_TXCHADDRS_TYPE2(BE, ACH4, CH4, _V1),
1041 		DEF_TXCHADDRS_TYPE2(BE, ACH5, CH5, _V1),
1042 		DEF_TXCHADDRS_TYPE2(BE, ACH6, CH6, _V1),
1043 		DEF_TXCHADDRS_TYPE2(BE, ACH7, CH7, _V1),
1044 		DEF_TXCHADDRS_TYPE2(BE, CH8, CH8, _V1),
1045 		DEF_TXCHADDRS_TYPE2(BE, CH9, CH9, _V1),
1046 		DEF_TXCHADDRS_TYPE2(BE, CH10, CH10, _V1),
1047 		DEF_TXCHADDRS_TYPE2(BE, CH11, CH11, _V1),
1048 		DEF_TXCHADDRS_TYPE2(BE, CH12, CH12, _V1),
1049 	},
1050 	.rx = {
1051 		DEF_RXCHADDRS(BE, RXQ, RXQ0, _V1),
1052 		DEF_RXCHADDRS(BE, RPQ, RPQ0, _V1),
1053 	},
1054 };
1055 EXPORT_SYMBOL(rtw89_pci_ch_dma_addr_set_be);
1056 
1057 #undef DEF_TXCHADDRS_TYPE1
1058 #undef DEF_TXCHADDRS
1059 #undef DEF_RXCHADDRS
1060 
rtw89_pci_get_txch_addrs(struct rtw89_dev * rtwdev,enum rtw89_tx_channel txch,const struct rtw89_pci_ch_dma_addr ** addr)1061 static int rtw89_pci_get_txch_addrs(struct rtw89_dev *rtwdev,
1062 				    enum rtw89_tx_channel txch,
1063 				    const struct rtw89_pci_ch_dma_addr **addr)
1064 {
1065 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1066 
1067 	if (txch >= RTW89_TXCH_NUM)
1068 		return -EINVAL;
1069 
1070 	*addr = &info->dma_addr_set->tx[txch];
1071 
1072 	return 0;
1073 }
1074 
rtw89_pci_get_rxch_addrs(struct rtw89_dev * rtwdev,enum rtw89_rx_channel rxch,const struct rtw89_pci_ch_dma_addr ** addr)1075 static int rtw89_pci_get_rxch_addrs(struct rtw89_dev *rtwdev,
1076 				    enum rtw89_rx_channel rxch,
1077 				    const struct rtw89_pci_ch_dma_addr **addr)
1078 {
1079 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1080 
1081 	if (rxch >= RTW89_RXCH_NUM)
1082 		return -EINVAL;
1083 
1084 	*addr = &info->dma_addr_set->rx[rxch];
1085 
1086 	return 0;
1087 }
1088 
rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring * ring)1089 static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
1090 {
1091 	struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
1092 
1093 	/* reserved 1 desc check ring is full or not */
1094 	if (bd_ring->rp > bd_ring->wp)
1095 		return bd_ring->rp - bd_ring->wp - 1;
1096 
1097 	return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
1098 }
1099 
1100 static
__rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev * rtwdev)1101 u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
1102 {
1103 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1104 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
1105 	u32 cnt;
1106 
1107 	spin_lock_bh(&rtwpci->trx_lock);
1108 	rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
1109 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1110 	spin_unlock_bh(&rtwpci->trx_lock);
1111 
1112 	return cnt;
1113 }
1114 
1115 static
__rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev * rtwdev,u8 txch)1116 u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
1117 						   u8 txch)
1118 {
1119 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1120 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1121 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1122 	u32 cnt;
1123 
1124 	spin_lock_bh(&rtwpci->trx_lock);
1125 	cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1126 	if (txch != RTW89_TXCH_CH12)
1127 		cnt = min(cnt, wd_ring->curr_num);
1128 	spin_unlock_bh(&rtwpci->trx_lock);
1129 
1130 	return cnt;
1131 }
1132 
__rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1133 static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1134 						     u8 txch)
1135 {
1136 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1137 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1138 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
1139 	const struct rtw89_chip_info *chip = rtwdev->chip;
1140 	u32 bd_cnt, wd_cnt, min_cnt = 0;
1141 	struct rtw89_pci_rx_ring *rx_ring;
1142 	enum rtw89_debug_mask debug_mask;
1143 	u32 cnt;
1144 
1145 	rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
1146 
1147 	spin_lock_bh(&rtwpci->trx_lock);
1148 	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1149 	wd_cnt = wd_ring->curr_num;
1150 
1151 	if (wd_cnt == 0 || bd_cnt == 0) {
1152 		cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
1153 		if (cnt)
1154 			rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
1155 		else if (wd_cnt == 0)
1156 			goto out_unlock;
1157 
1158 		bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1159 		if (bd_cnt == 0)
1160 			rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
1161 	}
1162 
1163 	bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
1164 	wd_cnt = wd_ring->curr_num;
1165 	min_cnt = min(bd_cnt, wd_cnt);
1166 	if (min_cnt == 0) {
1167 		/* This message can be frequently shown in low power mode or
1168 		 * high traffic with small FIFO chips, and we have recognized it as normal
1169 		 * behavior, so print with mask RTW89_DBG_TXRX in these situations.
1170 		 */
1171 		if (rtwpci->low_power || chip->small_fifo_size)
1172 			debug_mask = RTW89_DBG_TXRX;
1173 		else
1174 			debug_mask = RTW89_DBG_UNEXP;
1175 
1176 		rtw89_debug(rtwdev, debug_mask,
1177 			    "still no tx resource after reclaim: wd_cnt=%d bd_cnt=%d\n",
1178 			    wd_cnt, bd_cnt);
1179 	}
1180 
1181 out_unlock:
1182 	spin_unlock_bh(&rtwpci->trx_lock);
1183 
1184 	return min_cnt;
1185 }
1186 
rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev * rtwdev,u8 txch)1187 static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
1188 						   u8 txch)
1189 {
1190 	if (rtwdev->hci.paused)
1191 		return __rtw89_pci_check_and_reclaim_tx_resource_noio(rtwdev, txch);
1192 
1193 	if (txch == RTW89_TXCH_CH12)
1194 		return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
1195 
1196 	return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
1197 }
1198 
__rtw89_pci_tx_kick_off(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1199 static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
1200 {
1201 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1202 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1203 	u32 host_idx, addr;
1204 
1205 	spin_lock_bh(&rtwpci->trx_lock);
1206 
1207 	addr = bd_ring->addr.idx;
1208 	host_idx = bd_ring->wp;
1209 	rtw89_write16(rtwdev, addr, host_idx);
1210 
1211 	spin_unlock_bh(&rtwpci->trx_lock);
1212 }
1213 
rtw89_pci_tx_bd_ring_update(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,int n_txbd)1214 static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
1215 					int n_txbd)
1216 {
1217 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1218 	u32 host_idx, len;
1219 
1220 	len = bd_ring->len;
1221 	host_idx = bd_ring->wp + n_txbd;
1222 	host_idx = host_idx < len ? host_idx : host_idx - len;
1223 
1224 	bd_ring->wp = host_idx;
1225 }
1226 
rtw89_pci_ops_tx_kick_off(struct rtw89_dev * rtwdev,u8 txch)1227 static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
1228 {
1229 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1230 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1231 
1232 	if (rtwdev->hci.paused) {
1233 		set_bit(txch, rtwpci->kick_map);
1234 		return;
1235 	}
1236 
1237 	__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1238 }
1239 
rtw89_pci_tx_kick_off_pending(struct rtw89_dev * rtwdev)1240 static void rtw89_pci_tx_kick_off_pending(struct rtw89_dev *rtwdev)
1241 {
1242 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1243 	struct rtw89_pci_tx_ring *tx_ring;
1244 	int txch;
1245 
1246 	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1247 		if (!test_and_clear_bit(txch, rtwpci->kick_map))
1248 			continue;
1249 
1250 		tx_ring = &rtwpci->tx_rings[txch];
1251 		__rtw89_pci_tx_kick_off(rtwdev, tx_ring);
1252 	}
1253 }
1254 
__pci_flush_txch(struct rtw89_dev * rtwdev,u8 txch,bool drop)1255 static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
1256 {
1257 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1258 	struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
1259 	struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
1260 	u32 cur_idx, cur_rp;
1261 	u8 i;
1262 
1263 	/* Because the time taked by the I/O is a bit dynamic, it's hard to
1264 	 * define a reasonable fixed total timeout to use read_poll_timeout*
1265 	 * helper. Instead, we can ensure a reasonable polling times, so we
1266 	 * just use for loop with udelay here.
1267 	 */
1268 	for (i = 0; i < 60; i++) {
1269 		cur_idx = rtw89_read32(rtwdev, bd_ring->addr.idx);
1270 		cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
1271 		if (cur_rp == bd_ring->wp)
1272 			return;
1273 
1274 		udelay(1);
1275 	}
1276 
1277 	if (!drop)
1278 		rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
1279 }
1280 
__rtw89_pci_ops_flush_txchs(struct rtw89_dev * rtwdev,u32 txchs,bool drop)1281 static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
1282 					bool drop)
1283 {
1284 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1285 	u8 i;
1286 
1287 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1288 		/* It may be unnecessary to flush FWCMD queue. */
1289 		if (i == RTW89_TXCH_CH12)
1290 			continue;
1291 		if (info->tx_dma_ch_mask & BIT(i))
1292 			continue;
1293 
1294 		if (txchs & BIT(i))
1295 			__pci_flush_txch(rtwdev, i, drop);
1296 	}
1297 }
1298 
rtw89_pci_ops_flush_queues(struct rtw89_dev * rtwdev,u32 queues,bool drop)1299 static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
1300 				       bool drop)
1301 {
1302 	__rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
1303 }
1304 
rtw89_pci_fill_txaddr_info(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1305 u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
1306 			       void *txaddr_info_addr, u32 total_len,
1307 			       dma_addr_t dma, u8 *add_info_nr)
1308 {
1309 	struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
1310 	__le16 option;
1311 
1312 	txaddr_info->length = cpu_to_le16(total_len);
1313 	option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1));
1314 	option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK);
1315 	txaddr_info->option = option;
1316 	txaddr_info->dma = cpu_to_le32(dma);
1317 
1318 	*add_info_nr = 1;
1319 
1320 	return sizeof(*txaddr_info);
1321 }
1322 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info);
1323 
rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev * rtwdev,void * txaddr_info_addr,u32 total_len,dma_addr_t dma,u8 * add_info_nr)1324 u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
1325 				  void *txaddr_info_addr, u32 total_len,
1326 				  dma_addr_t dma, u8 *add_info_nr)
1327 {
1328 	struct rtw89_pci_tx_addr_info_32_v1 *txaddr_info = txaddr_info_addr;
1329 	u32 remain = total_len;
1330 	u32 len;
1331 	u16 length_option;
1332 	int n;
1333 
1334 	for (n = 0; n < RTW89_TXADDR_INFO_NR_V1 && remain; n++) {
1335 		len = remain >= TXADDR_INFO_LENTHG_V1_MAX ?
1336 		      TXADDR_INFO_LENTHG_V1_MAX : remain;
1337 		remain -= len;
1338 
1339 		length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
1340 				FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
1341 				FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
1342 		length_option |= u16_encode_bits(upper_32_bits(dma),
1343 						 B_PCIADDR_HIGH_SEL_V1_MASK);
1344 		txaddr_info->length_opt = cpu_to_le16(length_option);
1345 		txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
1346 		txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
1347 
1348 		dma += len;
1349 		txaddr_info++;
1350 	}
1351 
1352 	WARN_ONCE(remain, "length overflow remain=%u total_len=%u",
1353 		  remain, total_len);
1354 
1355 	*add_info_nr = n;
1356 
1357 	return n * sizeof(*txaddr_info);
1358 }
1359 EXPORT_SYMBOL(rtw89_pci_fill_txaddr_info_v1);
1360 
rtw89_pci_txwd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_wd * txwd,struct rtw89_core_tx_request * tx_req)1361 static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
1362 				 struct rtw89_pci_tx_ring *tx_ring,
1363 				 struct rtw89_pci_tx_wd *txwd,
1364 				 struct rtw89_core_tx_request *tx_req)
1365 {
1366 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1367 	const struct rtw89_chip_info *chip = rtwdev->chip;
1368 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1369 	struct rtw89_pci_tx_wp_info *txwp_info;
1370 	void *txaddr_info_addr;
1371 	struct pci_dev *pdev = rtwpci->pdev;
1372 	struct sk_buff *skb = tx_req->skb;
1373 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1374 	struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
1375 	bool en_wd_info = desc_info->en_wd_info;
1376 	u32 txwd_len;
1377 	u32 txwp_len;
1378 	u32 txaddr_info_len;
1379 	dma_addr_t dma;
1380 	int ret;
1381 
1382 	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1383 	if (dma_mapping_error(&pdev->dev, dma)) {
1384 		rtw89_err(rtwdev, "failed to map skb dma data\n");
1385 		ret = -EBUSY;
1386 		goto err;
1387 	}
1388 
1389 	tx_data->dma = dma;
1390 	rcu_assign_pointer(skb_data->wait, NULL);
1391 
1392 	txwp_len = sizeof(*txwp_info);
1393 	txwd_len = chip->txwd_body_size;
1394 	txwd_len += en_wd_info ? chip->txwd_info_size : 0;
1395 
1396 	txwp_info = txwd->vaddr + txwd_len;
1397 	txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
1398 	txwp_info->seq1 = 0;
1399 	txwp_info->seq2 = 0;
1400 	txwp_info->seq3 = 0;
1401 
1402 	tx_ring->tx_cnt++;
1403 	txaddr_info_addr = txwd->vaddr + txwd_len + txwp_len;
1404 	txaddr_info_len =
1405 		rtw89_chip_fill_txaddr_info(rtwdev, txaddr_info_addr, skb->len,
1406 					    dma, &desc_info->addr_info_nr);
1407 
1408 	txwd->len = txwd_len + txwp_len + txaddr_info_len;
1409 
1410 	rtw89_chip_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
1411 
1412 	skb_queue_tail(&txwd->queue, skb);
1413 
1414 	return 0;
1415 
1416 err:
1417 	return ret;
1418 }
1419 
rtw89_pci_fwcmd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1420 static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
1421 				  struct rtw89_pci_tx_ring *tx_ring,
1422 				  struct rtw89_pci_tx_bd_32 *txbd,
1423 				  struct rtw89_core_tx_request *tx_req)
1424 {
1425 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1426 	const struct rtw89_chip_info *chip = rtwdev->chip;
1427 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1428 	void *txdesc;
1429 	int txdesc_size = chip->h2c_desc_size;
1430 	struct pci_dev *pdev = rtwpci->pdev;
1431 	struct sk_buff *skb = tx_req->skb;
1432 	struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
1433 	dma_addr_t dma;
1434 	__le16 opt;
1435 
1436 	txdesc = skb_push(skb, txdesc_size);
1437 	memset(txdesc, 0, txdesc_size);
1438 	rtw89_chip_fill_txdesc_fwcmd(rtwdev, desc_info, txdesc);
1439 
1440 	dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1441 	if (dma_mapping_error(&pdev->dev, dma)) {
1442 		rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
1443 		return -EBUSY;
1444 	}
1445 
1446 	tx_data->dma = dma;
1447 	opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
1448 	opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI);
1449 	txbd->opt = opt;
1450 	txbd->length = cpu_to_le16(skb->len);
1451 	txbd->dma = cpu_to_le32(tx_data->dma);
1452 	skb_queue_tail(&rtwpci->h2c_queue, skb);
1453 
1454 	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1455 
1456 	return 0;
1457 }
1458 
rtw89_pci_txbd_submit(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring,struct rtw89_pci_tx_bd_32 * txbd,struct rtw89_core_tx_request * tx_req)1459 static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
1460 				 struct rtw89_pci_tx_ring *tx_ring,
1461 				 struct rtw89_pci_tx_bd_32 *txbd,
1462 				 struct rtw89_core_tx_request *tx_req)
1463 {
1464 	struct rtw89_pci_tx_wd *txwd;
1465 	__le16 opt;
1466 	int ret;
1467 
1468 	/* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
1469 	 * buffer with WD BODY only. So here we don't need to check the free
1470 	 * pages of the wd ring.
1471 	 */
1472 	if (tx_ring->txch == RTW89_TXCH_CH12)
1473 		return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
1474 
1475 	txwd = rtw89_pci_dequeue_txwd(tx_ring);
1476 	if (!txwd) {
1477 		rtw89_err(rtwdev, "no available TXWD\n");
1478 		ret = -ENOSPC;
1479 		goto err;
1480 	}
1481 
1482 	ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
1483 	if (ret) {
1484 		rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
1485 		goto err_enqueue_wd;
1486 	}
1487 
1488 	list_add_tail(&txwd->list, &tx_ring->busy_pages);
1489 
1490 	opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
1491 	opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI);
1492 	txbd->opt = opt;
1493 	txbd->length = cpu_to_le16(txwd->len);
1494 	txbd->dma = cpu_to_le32(txwd->paddr);
1495 
1496 	rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
1497 
1498 	return 0;
1499 
1500 err_enqueue_wd:
1501 	rtw89_pci_enqueue_txwd(tx_ring, txwd);
1502 err:
1503 	return ret;
1504 }
1505 
rtw89_pci_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req,u8 txch)1506 static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
1507 			      u8 txch)
1508 {
1509 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1510 	struct rtw89_pci_tx_ring *tx_ring;
1511 	struct rtw89_pci_tx_bd_32 *txbd;
1512 	u32 n_avail_txbd;
1513 	int ret = 0;
1514 
1515 	/* check the tx type and dma channel for fw cmd queue */
1516 	if ((txch == RTW89_TXCH_CH12 ||
1517 	     tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
1518 	    (txch != RTW89_TXCH_CH12 ||
1519 	     tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
1520 		rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
1521 		return -EINVAL;
1522 	}
1523 
1524 	tx_ring = &rtwpci->tx_rings[txch];
1525 	spin_lock_bh(&rtwpci->trx_lock);
1526 
1527 	n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
1528 	if (n_avail_txbd == 0) {
1529 		rtw89_err(rtwdev, "no available TXBD\n");
1530 		ret = -ENOSPC;
1531 		goto err_unlock;
1532 	}
1533 
1534 	txbd = rtw89_pci_get_next_txbd(tx_ring);
1535 	ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
1536 	if (ret) {
1537 		rtw89_err(rtwdev, "failed to submit TXBD\n");
1538 		goto err_unlock;
1539 	}
1540 
1541 	spin_unlock_bh(&rtwpci->trx_lock);
1542 	return 0;
1543 
1544 err_unlock:
1545 	spin_unlock_bh(&rtwpci->trx_lock);
1546 	return ret;
1547 }
1548 
rtw89_pci_ops_tx_write(struct rtw89_dev * rtwdev,struct rtw89_core_tx_request * tx_req)1549 static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
1550 {
1551 	struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
1552 	int ret;
1553 
1554 	ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
1555 	if (ret) {
1556 		rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
1557 		return ret;
1558 	}
1559 
1560 	return 0;
1561 }
1562 
1563 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
1564 	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1565 	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1566 	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1567 	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1568 	[RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
1569 	[RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
1570 	[RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
1571 	[RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
1572 	[RTW89_TXCH_CH8]  = {.start_idx = 40, .max_num = 5, .min_num = 1},
1573 	[RTW89_TXCH_CH9]  = {.start_idx = 45, .max_num = 5, .min_num = 1},
1574 	[RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
1575 	[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
1576 	[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
1577 };
1578 EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
1579 
1580 const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
1581 	[RTW89_TXCH_ACH0] = {.start_idx = 0,  .max_num = 5, .min_num = 2},
1582 	[RTW89_TXCH_ACH1] = {.start_idx = 5,  .max_num = 5, .min_num = 2},
1583 	[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
1584 	[RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
1585 	[RTW89_TXCH_CH8]  = {.start_idx = 20, .max_num = 4, .min_num = 1},
1586 	[RTW89_TXCH_CH9]  = {.start_idx = 24, .max_num = 4, .min_num = 1},
1587 	[RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
1588 };
1589 EXPORT_SYMBOL(rtw89_bd_ram_table_single);
1590 
rtw89_pci_init_wp_16sel(struct rtw89_dev * rtwdev)1591 static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
1592 {
1593 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1594 	u32 addr = info->wp_sel_addr;
1595 	u32 val;
1596 	int i;
1597 
1598 	if (!info->wp_sel_addr)
1599 		return;
1600 
1601 	for (i = 0; i < 16; i += 4) {
1602 		val = u32_encode_bits(i + 0, MASKBYTE0) |
1603 		      u32_encode_bits(i + 1, MASKBYTE1) |
1604 		      u32_encode_bits(i + 2, MASKBYTE2) |
1605 		      u32_encode_bits(i + 3, MASKBYTE3);
1606 		rtw89_write32(rtwdev, addr + i, val);
1607 	}
1608 }
1609 
rtw89_pci_reset_trx_rings(struct rtw89_dev * rtwdev)1610 static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
1611 {
1612 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1613 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1614 	const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
1615 	struct rtw89_pci_tx_ring *tx_ring;
1616 	struct rtw89_pci_rx_ring *rx_ring;
1617 	struct rtw89_pci_dma_ring *bd_ring;
1618 	const struct rtw89_pci_bd_ram *bd_ram;
1619 	u32 addr_num;
1620 	u32 addr_idx;
1621 	u32 addr_bdram;
1622 	u32 addr_desa_l;
1623 	u32 val32;
1624 	int i;
1625 
1626 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1627 		if (info->tx_dma_ch_mask & BIT(i))
1628 			continue;
1629 
1630 		tx_ring = &rtwpci->tx_rings[i];
1631 		bd_ring = &tx_ring->bd_ring;
1632 		bd_ram = bd_ram_table ? &bd_ram_table[i] : NULL;
1633 		addr_num = bd_ring->addr.num;
1634 		addr_bdram = bd_ring->addr.bdram;
1635 		addr_desa_l = bd_ring->addr.desa_l;
1636 		bd_ring->wp = 0;
1637 		bd_ring->rp = 0;
1638 
1639 		rtw89_write16(rtwdev, addr_num, bd_ring->len);
1640 		if (addr_bdram && bd_ram) {
1641 			val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
1642 				FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
1643 				FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
1644 
1645 			rtw89_write32(rtwdev, addr_bdram, val32);
1646 		}
1647 		rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1648 		rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
1649 	}
1650 
1651 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1652 		rx_ring = &rtwpci->rx_rings[i];
1653 		bd_ring = &rx_ring->bd_ring;
1654 		addr_num = bd_ring->addr.num;
1655 		addr_idx = bd_ring->addr.idx;
1656 		addr_desa_l = bd_ring->addr.desa_l;
1657 		if (info->rx_ring_eq_is_full)
1658 			bd_ring->wp = bd_ring->len - 1;
1659 		else
1660 			bd_ring->wp = 0;
1661 		bd_ring->rp = 0;
1662 		rx_ring->diliver_skb = NULL;
1663 		rx_ring->diliver_desc.ready = false;
1664 		rx_ring->target_rx_tag = 0;
1665 
1666 		rtw89_write16(rtwdev, addr_num, bd_ring->len);
1667 		rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
1668 		rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
1669 
1670 		if (info->rx_ring_eq_is_full)
1671 			rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
1672 	}
1673 
1674 	rtw89_pci_init_wp_16sel(rtwdev);
1675 }
1676 
rtw89_pci_release_tx_ring(struct rtw89_dev * rtwdev,struct rtw89_pci_tx_ring * tx_ring)1677 static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
1678 				      struct rtw89_pci_tx_ring *tx_ring)
1679 {
1680 	rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
1681 	rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
1682 }
1683 
rtw89_pci_ops_reset(struct rtw89_dev * rtwdev)1684 void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
1685 {
1686 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1687 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1688 	int txch;
1689 
1690 	rtw89_pci_reset_trx_rings(rtwdev);
1691 
1692 	spin_lock_bh(&rtwpci->trx_lock);
1693 	for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
1694 		if (info->tx_dma_ch_mask & BIT(txch))
1695 			continue;
1696 		if (txch == RTW89_TXCH_CH12) {
1697 			rtw89_pci_release_fwcmd(rtwdev, rtwpci,
1698 						skb_queue_len(&rtwpci->h2c_queue), true);
1699 			continue;
1700 		}
1701 		rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
1702 	}
1703 	spin_unlock_bh(&rtwpci->trx_lock);
1704 }
1705 
rtw89_pci_enable_intr_lock(struct rtw89_dev * rtwdev)1706 static void rtw89_pci_enable_intr_lock(struct rtw89_dev *rtwdev)
1707 {
1708 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1709 	unsigned long flags;
1710 
1711 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1712 	rtwpci->running = true;
1713 	rtw89_chip_enable_intr(rtwdev, rtwpci);
1714 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1715 }
1716 
rtw89_pci_disable_intr_lock(struct rtw89_dev * rtwdev)1717 static void rtw89_pci_disable_intr_lock(struct rtw89_dev *rtwdev)
1718 {
1719 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1720 	unsigned long flags;
1721 
1722 	spin_lock_irqsave(&rtwpci->irq_lock, flags);
1723 	rtwpci->running = false;
1724 	rtw89_chip_disable_intr(rtwdev, rtwpci);
1725 	spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
1726 }
1727 
rtw89_pci_ops_start(struct rtw89_dev * rtwdev)1728 static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
1729 {
1730 	rtw89_core_napi_start(rtwdev);
1731 	rtw89_pci_enable_intr_lock(rtwdev);
1732 
1733 	return 0;
1734 }
1735 
rtw89_pci_ops_stop(struct rtw89_dev * rtwdev)1736 static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
1737 {
1738 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1739 	struct pci_dev *pdev = rtwpci->pdev;
1740 
1741 	rtw89_pci_disable_intr_lock(rtwdev);
1742 	synchronize_irq(pdev->irq);
1743 	rtw89_core_napi_stop(rtwdev);
1744 }
1745 
rtw89_pci_ops_pause(struct rtw89_dev * rtwdev,bool pause)1746 static void rtw89_pci_ops_pause(struct rtw89_dev *rtwdev, bool pause)
1747 {
1748 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1749 	struct pci_dev *pdev = rtwpci->pdev;
1750 
1751 	if (pause) {
1752 		rtw89_pci_disable_intr_lock(rtwdev);
1753 		synchronize_irq(pdev->irq);
1754 		if (test_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
1755 			napi_synchronize(&rtwdev->napi);
1756 	} else {
1757 		rtw89_pci_enable_intr_lock(rtwdev);
1758 		rtw89_pci_tx_kick_off_pending(rtwdev);
1759 	}
1760 }
1761 
1762 static
rtw89_pci_switch_bd_idx_addr(struct rtw89_dev * rtwdev,bool low_power)1763 void rtw89_pci_switch_bd_idx_addr(struct rtw89_dev *rtwdev, bool low_power)
1764 {
1765 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1766 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1767 	const struct rtw89_pci_bd_idx_addr *bd_idx_addr = info->bd_idx_addr_low_power;
1768 	const struct rtw89_pci_ch_dma_addr_set *dma_addr_set = info->dma_addr_set;
1769 	struct rtw89_pci_tx_ring *tx_ring;
1770 	struct rtw89_pci_rx_ring *rx_ring;
1771 	int i;
1772 
1773 	if (WARN(!bd_idx_addr, "only HCI with low power mode needs this\n"))
1774 		return;
1775 
1776 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
1777 		tx_ring = &rtwpci->tx_rings[i];
1778 		tx_ring->bd_ring.addr.idx = low_power ?
1779 					    bd_idx_addr->tx_bd_addrs[i] :
1780 					    dma_addr_set->tx[i].idx;
1781 	}
1782 
1783 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
1784 		rx_ring = &rtwpci->rx_rings[i];
1785 		rx_ring->bd_ring.addr.idx = low_power ?
1786 					    bd_idx_addr->rx_bd_addrs[i] :
1787 					    dma_addr_set->rx[i].idx;
1788 	}
1789 }
1790 
rtw89_pci_ops_switch_mode(struct rtw89_dev * rtwdev,bool low_power)1791 static void rtw89_pci_ops_switch_mode(struct rtw89_dev *rtwdev, bool low_power)
1792 {
1793 	enum rtw89_pci_intr_mask_cfg cfg;
1794 
1795 	WARN(!rtwdev->hci.paused, "HCI isn't paused\n");
1796 
1797 	cfg = low_power ? RTW89_PCI_INTR_MASK_LOW_POWER : RTW89_PCI_INTR_MASK_NORMAL;
1798 	rtw89_chip_config_intr_mask(rtwdev, cfg);
1799 	rtw89_pci_switch_bd_idx_addr(rtwdev, low_power);
1800 }
1801 
1802 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
1803 
rtw89_pci_ops_read32_cmac(struct rtw89_dev * rtwdev,u32 addr)1804 static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
1805 {
1806 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1807 	u32 val = readl(rtwpci->mmap + addr);
1808 	int count;
1809 
1810 	for (count = 0; ; count++) {
1811 		if (val != RTW89_R32_DEAD)
1812 			return val;
1813 		if (count >= MAC_REG_POOL_COUNT) {
1814 			rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
1815 			return RTW89_R32_DEAD;
1816 		}
1817 		rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
1818 		val = readl(rtwpci->mmap + addr);
1819 	}
1820 
1821 	return val;
1822 }
1823 
rtw89_pci_ops_read8(struct rtw89_dev * rtwdev,u32 addr)1824 static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
1825 {
1826 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1827 	u32 addr32, val32, shift;
1828 
1829 	if (!ACCESS_CMAC(addr))
1830 		return readb(rtwpci->mmap + addr);
1831 
1832 	addr32 = addr & ~0x3;
1833 	shift = (addr & 0x3) * 8;
1834 	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1835 	return val32 >> shift;
1836 }
1837 
rtw89_pci_ops_read16(struct rtw89_dev * rtwdev,u32 addr)1838 static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
1839 {
1840 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1841 	u32 addr32, val32, shift;
1842 
1843 	if (!ACCESS_CMAC(addr))
1844 		return readw(rtwpci->mmap + addr);
1845 
1846 	addr32 = addr & ~0x3;
1847 	shift = (addr & 0x3) * 8;
1848 	val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
1849 	return val32 >> shift;
1850 }
1851 
rtw89_pci_ops_read32(struct rtw89_dev * rtwdev,u32 addr)1852 static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
1853 {
1854 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1855 
1856 	if (!ACCESS_CMAC(addr))
1857 		return readl(rtwpci->mmap + addr);
1858 
1859 	return rtw89_pci_ops_read32_cmac(rtwdev, addr);
1860 }
1861 
rtw89_pci_ops_write8(struct rtw89_dev * rtwdev,u32 addr,u8 data)1862 static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
1863 {
1864 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1865 
1866 	writeb(data, rtwpci->mmap + addr);
1867 }
1868 
rtw89_pci_ops_write16(struct rtw89_dev * rtwdev,u32 addr,u16 data)1869 static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
1870 {
1871 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1872 
1873 	writew(data, rtwpci->mmap + addr);
1874 }
1875 
rtw89_pci_ops_write32(struct rtw89_dev * rtwdev,u32 addr,u32 data)1876 static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
1877 {
1878 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
1879 
1880 	writel(data, rtwpci->mmap + addr);
1881 }
1882 
rtw89_pci_ctrl_dma_trx(struct rtw89_dev * rtwdev,bool enable)1883 static void rtw89_pci_ctrl_dma_trx(struct rtw89_dev *rtwdev, bool enable)
1884 {
1885 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1886 
1887 	if (enable)
1888 		rtw89_write32_set(rtwdev, info->init_cfg_reg,
1889 				  info->rxhci_en_bit | info->txhci_en_bit);
1890 	else
1891 		rtw89_write32_clr(rtwdev, info->init_cfg_reg,
1892 				  info->rxhci_en_bit | info->txhci_en_bit);
1893 }
1894 
rtw89_pci_ctrl_dma_io(struct rtw89_dev * rtwdev,bool enable)1895 static void rtw89_pci_ctrl_dma_io(struct rtw89_dev *rtwdev, bool enable)
1896 {
1897 	const struct rtw89_pci_info *info = rtwdev->pci_info;
1898 	const struct rtw89_reg_def *reg = &info->dma_io_stop;
1899 
1900 	if (enable)
1901 		rtw89_write32_clr(rtwdev, reg->addr, reg->mask);
1902 	else
1903 		rtw89_write32_set(rtwdev, reg->addr, reg->mask);
1904 }
1905 
rtw89_pci_ctrl_dma_all(struct rtw89_dev * rtwdev,bool enable)1906 void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
1907 {
1908 	rtw89_pci_ctrl_dma_io(rtwdev, enable);
1909 	rtw89_pci_ctrl_dma_trx(rtwdev, enable);
1910 }
1911 
rtw89_pci_check_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 rw_bit)1912 static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
1913 {
1914 	u16 val;
1915 
1916 	rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
1917 
1918 	val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
1919 	switch (speed) {
1920 	case PCIE_PHY_GEN1:
1921 		if (addr < 0x20)
1922 			val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
1923 		else
1924 			val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
1925 		break;
1926 	case PCIE_PHY_GEN2:
1927 		if (addr < 0x20)
1928 			val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
1929 		else
1930 			val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
1931 		break;
1932 	default:
1933 		rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
1934 		return -EINVAL;
1935 	}
1936 	rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
1937 	rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
1938 
1939 	return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
1940 				 false, rtwdev, R_AX_MDIO_CFG);
1941 }
1942 
1943 static int
rtw89_read16_mdio(struct rtw89_dev * rtwdev,u8 addr,u8 speed,u16 * val)1944 rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
1945 {
1946 	int ret;
1947 
1948 	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
1949 	if (ret) {
1950 		rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
1951 		return ret;
1952 	}
1953 	*val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
1954 
1955 	return 0;
1956 }
1957 
1958 static int
rtw89_write16_mdio(struct rtw89_dev * rtwdev,u8 addr,u16 data,u8 speed)1959 rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
1960 {
1961 	int ret;
1962 
1963 	rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
1964 	ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
1965 	if (ret) {
1966 		rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
1967 		return ret;
1968 	}
1969 
1970 	return 0;
1971 }
1972 
1973 static int
rtw89_write16_mdio_mask(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u16 data,u8 speed)1974 rtw89_write16_mdio_mask(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u16 data, u8 speed)
1975 {
1976 	u32 shift;
1977 	int ret;
1978 	u16 val;
1979 
1980 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
1981 	if (ret)
1982 		return ret;
1983 
1984 	shift = __ffs(mask);
1985 	val &= ~mask;
1986 	val |= ((data << shift) & mask);
1987 
1988 	ret = rtw89_write16_mdio(rtwdev, addr, val, speed);
1989 	if (ret)
1990 		return ret;
1991 
1992 	return 0;
1993 }
1994 
rtw89_write16_mdio_set(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)1995 static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
1996 {
1997 	int ret;
1998 	u16 val;
1999 
2000 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
2001 	if (ret)
2002 		return ret;
2003 	ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
2004 	if (ret)
2005 		return ret;
2006 
2007 	return 0;
2008 }
2009 
rtw89_write16_mdio_clr(struct rtw89_dev * rtwdev,u8 addr,u16 mask,u8 speed)2010 static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
2011 {
2012 	int ret;
2013 	u16 val;
2014 
2015 	ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
2016 	if (ret)
2017 		return ret;
2018 	ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
2019 	if (ret)
2020 		return ret;
2021 
2022 	return 0;
2023 }
2024 
rtw89_dbi_write8(struct rtw89_dev * rtwdev,u16 addr,u8 data)2025 static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
2026 {
2027 	u16 addr_2lsb = addr & B_AX_DBI_2LSB;
2028 	u16 write_addr;
2029 	u8 flag;
2030 	int ret;
2031 
2032 	write_addr = addr & B_AX_DBI_ADDR_MSK;
2033 	write_addr |= u16_encode_bits(BIT(addr_2lsb), B_AX_DBI_WREN_MSK);
2034 	rtw89_write8(rtwdev, R_AX_DBI_WDATA + addr_2lsb, data);
2035 	rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
2036 	rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
2037 
2038 	ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2039 				       10 * RTW89_PCI_WR_RETRY_CNT, false,
2040 				       rtwdev, R_AX_DBI_FLAG + 2);
2041 	if (ret)
2042 		rtw89_err(rtwdev, "failed to write DBI register, addr=0x%X\n",
2043 			  addr);
2044 
2045 	return ret;
2046 }
2047 
rtw89_dbi_read8(struct rtw89_dev * rtwdev,u16 addr,u8 * value)2048 static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
2049 {
2050 	u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
2051 	u8 flag;
2052 	int ret;
2053 
2054 	rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
2055 	rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
2056 
2057 	ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
2058 				       10 * RTW89_PCI_WR_RETRY_CNT, false,
2059 				       rtwdev, R_AX_DBI_FLAG + 2);
2060 	if (ret) {
2061 		rtw89_err(rtwdev, "failed to read DBI register, addr=0x%X\n",
2062 			  addr);
2063 		return ret;
2064 	}
2065 
2066 	read_addr = R_AX_DBI_RDATA + (addr & 3);
2067 	*value = rtw89_read8(rtwdev, read_addr);
2068 
2069 	return 0;
2070 }
2071 
rtw89_pci_write_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 data)2072 static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2073 				       u8 data)
2074 {
2075 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2076 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2077 	struct pci_dev *pdev = rtwpci->pdev;
2078 	int ret;
2079 
2080 	ret = pci_write_config_byte(pdev, addr, data);
2081 	if (!ret)
2082 		return 0;
2083 
2084 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
2085 		ret = rtw89_dbi_write8(rtwdev, addr, data);
2086 
2087 	return ret;
2088 }
2089 
rtw89_pci_read_config_byte(struct rtw89_dev * rtwdev,u16 addr,u8 * value)2090 static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
2091 				      u8 *value)
2092 {
2093 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
2094 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2095 	struct pci_dev *pdev = rtwpci->pdev;
2096 	int ret;
2097 
2098 	ret = pci_read_config_byte(pdev, addr, value);
2099 	if (!ret)
2100 		return 0;
2101 
2102 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
2103 		ret = rtw89_dbi_read8(rtwdev, addr, value);
2104 
2105 	return ret;
2106 }
2107 
rtw89_pci_config_byte_set(struct rtw89_dev * rtwdev,u16 addr,u8 bit)2108 static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
2109 				     u8 bit)
2110 {
2111 	u8 value;
2112 	int ret;
2113 
2114 	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2115 	if (ret)
2116 		return ret;
2117 
2118 	value |= bit;
2119 	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2120 
2121 	return ret;
2122 }
2123 
rtw89_pci_config_byte_clr(struct rtw89_dev * rtwdev,u16 addr,u8 bit)2124 static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
2125 				     u8 bit)
2126 {
2127 	u8 value;
2128 	int ret;
2129 
2130 	ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
2131 	if (ret)
2132 		return ret;
2133 
2134 	value &= ~bit;
2135 	ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
2136 
2137 	return ret;
2138 }
2139 
2140 static int
__get_target(struct rtw89_dev * rtwdev,u16 * target,enum rtw89_pcie_phy phy_rate)2141 __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
2142 {
2143 	u16 val, tar;
2144 	int ret;
2145 
2146 	/* Enable counter */
2147 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
2148 	if (ret)
2149 		return ret;
2150 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2151 				 phy_rate);
2152 	if (ret)
2153 		return ret;
2154 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
2155 				 phy_rate);
2156 	if (ret)
2157 		return ret;
2158 
2159 	fsleep(300);
2160 
2161 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
2162 	if (ret)
2163 		return ret;
2164 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
2165 				 phy_rate);
2166 	if (ret)
2167 		return ret;
2168 
2169 	tar = tar & 0x0FFF;
2170 	if (tar == 0 || tar == 0x0FFF) {
2171 		rtw89_err(rtwdev, "[ERR]Get target failed.\n");
2172 		return -EINVAL;
2173 	}
2174 
2175 	*target = tar;
2176 
2177 	return 0;
2178 }
2179 
rtw89_pci_autok_x(struct rtw89_dev * rtwdev)2180 static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
2181 {
2182 	int ret;
2183 
2184 	if (!rtw89_is_rtl885xb(rtwdev))
2185 		return 0;
2186 
2187 	ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
2188 				      PCIE_AUTOK_4, PCIE_PHY_GEN1);
2189 	return ret;
2190 }
2191 
rtw89_pci_auto_refclk_cal(struct rtw89_dev * rtwdev,bool autook_en)2192 static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
2193 {
2194 	enum rtw89_pcie_phy phy_rate;
2195 	u16 val16, mgn_set, div_set, tar;
2196 	u8 val8, bdr_ori;
2197 	bool l1_flag = false;
2198 	int ret = 0;
2199 
2200 	if (!rtw89_is_rtl885xb(rtwdev))
2201 		return 0;
2202 
2203 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
2204 	if (ret) {
2205 		rtw89_err(rtwdev, "[ERR]pci config read %X\n",
2206 			  RTW89_PCIE_PHY_RATE);
2207 		return ret;
2208 	}
2209 
2210 	if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
2211 		phy_rate = PCIE_PHY_GEN1;
2212 	} else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
2213 		phy_rate = PCIE_PHY_GEN2;
2214 	} else {
2215 		rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
2216 		return -EOPNOTSUPP;
2217 	}
2218 	/* Disable L1BD */
2219 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
2220 	if (ret) {
2221 		rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
2222 		return ret;
2223 	}
2224 
2225 	if (bdr_ori & RTW89_PCIE_BIT_L1) {
2226 		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2227 						  bdr_ori & ~RTW89_PCIE_BIT_L1);
2228 		if (ret) {
2229 			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2230 				  RTW89_PCIE_L1_CTRL);
2231 			return ret;
2232 		}
2233 		l1_flag = true;
2234 	}
2235 
2236 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2237 	if (ret) {
2238 		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2239 		goto end;
2240 	}
2241 
2242 	if (val16 & B_AX_CALIB_EN) {
2243 		ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
2244 					 val16 & ~B_AX_CALIB_EN, phy_rate);
2245 		if (ret) {
2246 			rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2247 			goto end;
2248 		}
2249 	}
2250 
2251 	if (!autook_en)
2252 		goto end;
2253 	/* Set div */
2254 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
2255 	if (ret) {
2256 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2257 		goto end;
2258 	}
2259 
2260 	/* Obtain div and margin */
2261 	ret = __get_target(rtwdev, &tar, phy_rate);
2262 	if (ret) {
2263 		rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
2264 		goto end;
2265 	}
2266 
2267 	mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
2268 
2269 	if (mgn_set >= 128) {
2270 		div_set = 0x0003;
2271 		mgn_set = 0x000F;
2272 	} else if (mgn_set >= 64) {
2273 		div_set = 0x0003;
2274 		mgn_set >>= 3;
2275 	} else if (mgn_set >= 32) {
2276 		div_set = 0x0002;
2277 		mgn_set >>= 2;
2278 	} else if (mgn_set >= 16) {
2279 		div_set = 0x0001;
2280 		mgn_set >>= 1;
2281 	} else if (mgn_set == 0) {
2282 		rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
2283 		goto end;
2284 	} else {
2285 		div_set = 0x0000;
2286 	}
2287 
2288 	ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
2289 	if (ret) {
2290 		rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
2291 		goto end;
2292 	}
2293 
2294 	val16 |= u16_encode_bits(div_set, B_AX_DIV);
2295 
2296 	ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
2297 	if (ret) {
2298 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2299 		goto end;
2300 	}
2301 
2302 	ret = __get_target(rtwdev, &tar, phy_rate);
2303 	if (ret) {
2304 		rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
2305 		goto end;
2306 	}
2307 
2308 	rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
2309 		    tar, div_set, mgn_set);
2310 	ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
2311 				 (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
2312 	if (ret) {
2313 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
2314 		goto end;
2315 	}
2316 
2317 	/* Enable function */
2318 	ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
2319 	if (ret) {
2320 		rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
2321 		goto end;
2322 	}
2323 
2324 	/* CLK delay = 0 */
2325 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
2326 					  PCIE_CLKDLY_HW_0);
2327 
2328 end:
2329 	/* Set L1BD to ori */
2330 	if (l1_flag) {
2331 		ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
2332 						  bdr_ori);
2333 		if (ret) {
2334 			rtw89_err(rtwdev, "[ERR]pci config write %X\n",
2335 				  RTW89_PCIE_L1_CTRL);
2336 			return ret;
2337 		}
2338 	}
2339 
2340 	return ret;
2341 }
2342 
rtw89_pci_deglitch_setting(struct rtw89_dev * rtwdev)2343 static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
2344 {
2345 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2346 	int ret;
2347 
2348 	if (chip_id == RTL8852A) {
2349 		ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2350 					     PCIE_PHY_GEN1);
2351 		if (ret)
2352 			return ret;
2353 		ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
2354 					     PCIE_PHY_GEN2);
2355 		if (ret)
2356 			return ret;
2357 	} else if (chip_id == RTL8852C) {
2358 		rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA24 * 2,
2359 				  B_AX_DEGLITCH);
2360 		rtw89_write16_clr(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA24 * 2,
2361 				  B_AX_DEGLITCH);
2362 	}
2363 
2364 	return 0;
2365 }
2366 
rtw89_pci_disable_eq_ax(struct rtw89_dev * rtwdev)2367 static void rtw89_pci_disable_eq_ax(struct rtw89_dev *rtwdev)
2368 {
2369 	u16 g1_oobs, g2_oobs;
2370 	u32 backup_aspm;
2371 	u32 phy_offset;
2372 	u16 offset_cal;
2373 	u16 oobs_val;
2374 	int ret;
2375 	u8 gen;
2376 
2377 	if (rtwdev->chip->chip_id != RTL8852C)
2378 		return;
2379 
2380 	g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2381 					    RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2382 	g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
2383 					    RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
2384 	if (g1_oobs && g2_oobs)
2385 		return;
2386 
2387 	backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
2388 	rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
2389 
2390 	ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset);
2391 	if (ret)
2392 		goto out;
2393 
2394 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN);
2395 	rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
2396 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
2397 
2398 	oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
2399 				     OOBS_LEVEL_MASK);
2400 
2401 	rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT,
2402 			   OOBS_SEN_MASK, oobs_val);
2403 	rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
2404 			  BAC_OOBS_SEL);
2405 
2406 	rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT,
2407 			   OOBS_SEN_MASK, oobs_val);
2408 	rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
2409 			  BAC_OOBS_SEL);
2410 
2411 	/* offset K */
2412 	for (gen = 1; gen <= 2; gen++) {
2413 		phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 :
2414 					R_RAC_DIRECT_OFFSET_G2;
2415 
2416 		rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
2417 				  B_PCIE_BIT_RD_SEL);
2418 	}
2419 
2420 	offset_cal = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
2421 					       RAC_ANA1F * RAC_MULT, OFFSET_CAL_MASK);
2422 
2423 	for (gen = 1; gen <= 2; gen++) {
2424 		phy_offset = gen == 1 ? R_RAC_DIRECT_OFFSET_G1 :
2425 					R_RAC_DIRECT_OFFSET_G2;
2426 
2427 		rtw89_write16_mask(rtwdev, phy_offset + RAC_ANA0B * RAC_MULT,
2428 				   MANUAL_LVL_MASK, offset_cal);
2429 		rtw89_write16_clr(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT,
2430 				  OFFSET_CAL_MODE);
2431 	}
2432 
2433 out:
2434 	rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm);
2435 }
2436 
rtw89_pci_ber(struct rtw89_dev * rtwdev)2437 static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
2438 {
2439 	u32 phy_offset;
2440 
2441 	if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
2442 		return;
2443 
2444 	phy_offset = R_RAC_DIRECT_OFFSET_G1;
2445 	rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
2446 	rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2447 
2448 	phy_offset = R_RAC_DIRECT_OFFSET_G2;
2449 	rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
2450 	rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
2451 }
2452 
rtw89_pci_rxdma_prefth(struct rtw89_dev * rtwdev)2453 static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
2454 {
2455 	if (rtwdev->chip->chip_id != RTL8852A)
2456 		return;
2457 
2458 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
2459 }
2460 
rtw89_pci_l1off_pwroff(struct rtw89_dev * rtwdev)2461 static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
2462 {
2463 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2464 
2465 	if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
2466 		return;
2467 
2468 	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
2469 }
2470 
rtw89_pci_l2_rxen_lat(struct rtw89_dev * rtwdev)2471 static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
2472 {
2473 	int ret;
2474 
2475 	if (rtwdev->chip->chip_id != RTL8852A)
2476 		return 0;
2477 
2478 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2479 				     PCIE_PHY_GEN1);
2480 	if (ret)
2481 		return ret;
2482 
2483 	ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
2484 				     PCIE_PHY_GEN2);
2485 	if (ret)
2486 		return ret;
2487 
2488 	return 0;
2489 }
2490 
rtw89_pci_aphy_pwrcut(struct rtw89_dev * rtwdev)2491 static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
2492 {
2493 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2494 
2495 	if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
2496 		return;
2497 
2498 	rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
2499 }
2500 
rtw89_pci_hci_ldo(struct rtw89_dev * rtwdev)2501 static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
2502 {
2503 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2504 
2505 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2506 		rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
2507 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2508 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2509 				  B_AX_PCIE_DIS_WLSUS_AFT_PDN);
2510 	} else if (rtwdev->chip->chip_id == RTL8852C) {
2511 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
2512 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
2513 	}
2514 }
2515 
rtw89_pci_dphy_delay(struct rtw89_dev * rtwdev)2516 static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
2517 {
2518 	if (!rtw89_is_rtl885xb(rtwdev))
2519 		return 0;
2520 
2521 	return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
2522 				       PCIE_DPHY_DLY_25US, PCIE_PHY_GEN1);
2523 }
2524 
rtw89_pci_power_wake_ax(struct rtw89_dev * rtwdev,bool pwr_up)2525 static void rtw89_pci_power_wake_ax(struct rtw89_dev *rtwdev, bool pwr_up)
2526 {
2527 	if (pwr_up)
2528 		rtw89_write32_set(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2529 	else
2530 		rtw89_write32_clr(rtwdev, R_AX_HCI_OPT_CTRL, BIT_WAKE_CTRL);
2531 }
2532 
rtw89_pci_autoload_hang(struct rtw89_dev * rtwdev)2533 static void rtw89_pci_autoload_hang(struct rtw89_dev *rtwdev)
2534 {
2535 	if (rtwdev->chip->chip_id != RTL8852C)
2536 		return;
2537 
2538 	rtw89_write32_set(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2539 	rtw89_write32_clr(rtwdev, R_AX_PCIE_BG_CLR, B_AX_BG_CLR_ASYNC_M3);
2540 }
2541 
rtw89_pci_l12_vmain(struct rtw89_dev * rtwdev)2542 static void rtw89_pci_l12_vmain(struct rtw89_dev *rtwdev)
2543 {
2544 	if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2545 		return;
2546 
2547 	rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_FORCE_PWR_NGAT);
2548 }
2549 
rtw89_pci_gen2_force_ib(struct rtw89_dev * rtwdev)2550 static void rtw89_pci_gen2_force_ib(struct rtw89_dev *rtwdev)
2551 {
2552 	if (!(rtwdev->chip->chip_id == RTL8852C && rtwdev->hal.cv == CHIP_CAV))
2553 		return;
2554 
2555 	rtw89_write32_set(rtwdev, R_AX_PMC_DBG_CTRL2,
2556 			  B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2557 	rtw89_write32_set(rtwdev, R_AX_HCI_BG_CTRL, B_AX_BG_CLR_ASYNC_M3);
2558 	rtw89_write32_clr(rtwdev, R_AX_PMC_DBG_CTRL2,
2559 			  B_AX_SYSON_DIS_PMCR_AX_WRMSK);
2560 }
2561 
rtw89_pci_l1_ent_lat(struct rtw89_dev * rtwdev)2562 static void rtw89_pci_l1_ent_lat(struct rtw89_dev *rtwdev)
2563 {
2564 	if (rtwdev->chip->chip_id != RTL8852C)
2565 		return;
2566 
2567 	rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_SEL_REQ_ENTR_L1);
2568 }
2569 
rtw89_pci_wd_exit_l1(struct rtw89_dev * rtwdev)2570 static void rtw89_pci_wd_exit_l1(struct rtw89_dev *rtwdev)
2571 {
2572 	if (rtwdev->chip->chip_id != RTL8852C)
2573 		return;
2574 
2575 	rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1, B_AX_DMAC0_EXIT_L1_EN);
2576 }
2577 
rtw89_pci_set_sic(struct rtw89_dev * rtwdev)2578 static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
2579 {
2580 	if (rtwdev->chip->chip_id == RTL8852C)
2581 		return;
2582 
2583 	rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
2584 			  B_AX_SIC_EN_FORCE_CLKREQ);
2585 }
2586 
rtw89_pci_set_lbc(struct rtw89_dev * rtwdev)2587 static void rtw89_pci_set_lbc(struct rtw89_dev *rtwdev)
2588 {
2589 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2590 	u32 lbc;
2591 
2592 	if (rtwdev->chip->chip_id == RTL8852C)
2593 		return;
2594 
2595 	lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
2596 	if (info->lbc_en == MAC_AX_PCIE_ENABLE) {
2597 		lbc = u32_replace_bits(lbc, info->lbc_tmr, B_AX_LBC_TIMER);
2598 		lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
2599 		rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2600 	} else {
2601 		lbc &= ~B_AX_LBC_EN;
2602 	}
2603 	rtw89_write32_set(rtwdev, R_AX_LBC_WATCHDOG, lbc);
2604 }
2605 
rtw89_pci_set_io_rcy(struct rtw89_dev * rtwdev)2606 static void rtw89_pci_set_io_rcy(struct rtw89_dev *rtwdev)
2607 {
2608 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2609 	u32 val32;
2610 
2611 	if (rtwdev->chip->chip_id != RTL8852C)
2612 		return;
2613 
2614 	if (info->io_rcy_en == MAC_AX_PCIE_ENABLE) {
2615 		val32 = FIELD_PREP(B_AX_PCIE_WDT_TIMER_M1_MASK,
2616 				   info->io_rcy_tmr);
2617 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M1, val32);
2618 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_M2, val32);
2619 		rtw89_write32(rtwdev, R_AX_PCIE_WDT_TIMER_E0, val32);
2620 
2621 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2622 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2623 		rtw89_write32_set(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2624 	} else {
2625 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M1, B_AX_PCIE_IO_RCY_WDT_MODE_M1);
2626 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_M2, B_AX_PCIE_IO_RCY_WDT_MODE_M2);
2627 		rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_E0, B_AX_PCIE_IO_RCY_WDT_MODE_E0);
2628 	}
2629 
2630 	rtw89_write32_clr(rtwdev, R_AX_PCIE_IO_RCY_S1, B_AX_PCIE_IO_RCY_WDT_MODE_S1);
2631 }
2632 
rtw89_pci_set_dbg(struct rtw89_dev * rtwdev)2633 static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
2634 {
2635 	if (rtwdev->chip->chip_id == RTL8852C)
2636 		return;
2637 
2638 	rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
2639 			  B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
2640 
2641 	rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL,
2642 			   B_AX_EN_STUCK_DBG | B_AX_ASFF_FULL_NO_STK,
2643 			   B_AX_EN_STUCK_DBG);
2644 
2645 	if (rtwdev->chip->chip_id == RTL8852A)
2646 		rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
2647 				  B_AX_EN_CHKDSC_NO_RX_STUCK);
2648 }
2649 
rtw89_pci_set_keep_reg(struct rtw89_dev * rtwdev)2650 static void rtw89_pci_set_keep_reg(struct rtw89_dev *rtwdev)
2651 {
2652 	if (rtwdev->chip->chip_id == RTL8852C)
2653 		return;
2654 
2655 	rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
2656 			  B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
2657 }
2658 
rtw89_pci_clr_idx_all_ax(struct rtw89_dev * rtwdev)2659 static void rtw89_pci_clr_idx_all_ax(struct rtw89_dev *rtwdev)
2660 {
2661 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2662 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2663 	u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
2664 		  B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
2665 		  B_AX_CLR_CH12_IDX;
2666 	u32 rxbd_rwptr_clr = info->rxbd_rwptr_clr_reg;
2667 	u32 txbd_rwptr_clr2 = info->txbd_rwptr_clr2_reg;
2668 
2669 	if (chip_id == RTL8852A || chip_id == RTL8852C)
2670 		val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
2671 		       B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
2672 	/* clear DMA indexes */
2673 	rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
2674 	if (chip_id == RTL8852A || chip_id == RTL8852C)
2675 		rtw89_write32_set(rtwdev, txbd_rwptr_clr2,
2676 				  B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
2677 	rtw89_write32_set(rtwdev, rxbd_rwptr_clr,
2678 			  B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
2679 }
2680 
rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev * rtwdev)2681 static int rtw89_pci_poll_txdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2682 {
2683 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2684 	u32 dma_busy1 = info->dma_busy1.addr;
2685 	u32 dma_busy2 = info->dma_busy2_reg;
2686 	u32 check, dma_busy;
2687 	int ret;
2688 
2689 	check = info->dma_busy1.mask;
2690 
2691 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2692 				10, 100, false, rtwdev, dma_busy1);
2693 	if (ret)
2694 		return ret;
2695 
2696 	if (!dma_busy2)
2697 		return 0;
2698 
2699 	check = B_AX_CH10_BUSY | B_AX_CH11_BUSY;
2700 
2701 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2702 				10, 100, false, rtwdev, dma_busy2);
2703 	if (ret)
2704 		return ret;
2705 
2706 	return 0;
2707 }
2708 
rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev * rtwdev)2709 static int rtw89_pci_poll_rxdma_ch_idle_ax(struct rtw89_dev *rtwdev)
2710 {
2711 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2712 	u32 dma_busy3 = info->dma_busy3_reg;
2713 	u32 check, dma_busy;
2714 	int ret;
2715 
2716 	check = B_AX_RXQ_BUSY | B_AX_RPQ_BUSY;
2717 
2718 	ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
2719 				10, 100, false, rtwdev, dma_busy3);
2720 	if (ret)
2721 		return ret;
2722 
2723 	return 0;
2724 }
2725 
rtw89_pci_poll_dma_all_idle(struct rtw89_dev * rtwdev)2726 static int rtw89_pci_poll_dma_all_idle(struct rtw89_dev *rtwdev)
2727 {
2728 	u32 ret;
2729 
2730 	ret = rtw89_pci_poll_txdma_ch_idle_ax(rtwdev);
2731 	if (ret) {
2732 		rtw89_err(rtwdev, "txdma ch busy\n");
2733 		return ret;
2734 	}
2735 
2736 	ret = rtw89_pci_poll_rxdma_ch_idle_ax(rtwdev);
2737 	if (ret) {
2738 		rtw89_err(rtwdev, "rxdma ch busy\n");
2739 		return ret;
2740 	}
2741 
2742 	return 0;
2743 }
2744 
rtw89_pci_mode_op(struct rtw89_dev * rtwdev)2745 static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
2746 {
2747 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2748 	enum mac_ax_bd_trunc_mode txbd_trunc_mode = info->txbd_trunc_mode;
2749 	enum mac_ax_bd_trunc_mode rxbd_trunc_mode = info->rxbd_trunc_mode;
2750 	enum mac_ax_rxbd_mode rxbd_mode = info->rxbd_mode;
2751 	enum mac_ax_tag_mode tag_mode = info->tag_mode;
2752 	enum mac_ax_wd_dma_intvl wd_dma_idle_intvl = info->wd_dma_idle_intvl;
2753 	enum mac_ax_wd_dma_intvl wd_dma_act_intvl = info->wd_dma_act_intvl;
2754 	enum mac_ax_tx_burst tx_burst = info->tx_burst;
2755 	enum mac_ax_rx_burst rx_burst = info->rx_burst;
2756 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
2757 	u8 cv = rtwdev->hal.cv;
2758 	u32 val32;
2759 
2760 	if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2761 		if (chip_id == RTL8852A && cv == CHIP_CBV)
2762 			rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2763 	} else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2764 		if (chip_id == RTL8852A || chip_id == RTL8852B)
2765 			rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE);
2766 	}
2767 
2768 	if (rxbd_trunc_mode == MAC_AX_BD_TRUNC) {
2769 		if (chip_id == RTL8852A && cv == CHIP_CBV)
2770 			rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2771 	} else if (rxbd_trunc_mode == MAC_AX_BD_NORM) {
2772 		if (chip_id == RTL8852A || chip_id == RTL8852B)
2773 			rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RX_TRUNC_MODE);
2774 	}
2775 
2776 	if (rxbd_mode == MAC_AX_RXBD_PKT) {
2777 		rtw89_write32_clr(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2778 	} else if (rxbd_mode == MAC_AX_RXBD_SEP) {
2779 		rtw89_write32_set(rtwdev, info->init_cfg_reg, info->rxbd_mode_bit);
2780 
2781 		if (chip_id == RTL8852A || chip_id == RTL8852B)
2782 			rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2,
2783 					   B_AX_PCIE_RX_APPLEN_MASK, 0);
2784 	}
2785 
2786 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2787 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
2788 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
2789 	} else if (chip_id == RTL8852C) {
2790 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_TXDMA_MASK, tx_burst);
2791 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
2792 	}
2793 
2794 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2795 		if (tag_mode == MAC_AX_TAG_SGL) {
2796 			val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
2797 					    ~B_AX_LATENCY_CONTROL;
2798 			rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2799 		} else if (tag_mode == MAC_AX_TAG_MULTI) {
2800 			val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) |
2801 					    B_AX_LATENCY_CONTROL;
2802 			rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
2803 		}
2804 	}
2805 
2806 	rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
2807 			   info->multi_tag_num);
2808 
2809 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
2810 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
2811 				   wd_dma_idle_intvl);
2812 		rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
2813 				   wd_dma_act_intvl);
2814 	} else if (chip_id == RTL8852C) {
2815 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_IDLE_V1_MASK,
2816 				   wd_dma_idle_intvl);
2817 		rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_WD_ITVL_ACT_V1_MASK,
2818 				   wd_dma_act_intvl);
2819 	}
2820 
2821 	if (txbd_trunc_mode == MAC_AX_BD_TRUNC) {
2822 		rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2823 				  B_AX_HOST_ADDR_INFO_8B_SEL);
2824 		rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2825 	} else if (txbd_trunc_mode == MAC_AX_BD_NORM) {
2826 		rtw89_write32_clr(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
2827 				  B_AX_HOST_ADDR_INFO_8B_SEL);
2828 		rtw89_write32_set(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
2829 	}
2830 
2831 	return 0;
2832 }
2833 
rtw89_pci_ops_deinit(struct rtw89_dev * rtwdev)2834 static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
2835 {
2836 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2837 
2838 	rtw89_pci_power_wake(rtwdev, false);
2839 
2840 	if (rtwdev->chip->chip_id == RTL8852A) {
2841 		/* ltr sw trigger */
2842 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
2843 	}
2844 	info->ltr_set(rtwdev, false);
2845 	rtw89_pci_ctrl_dma_all(rtwdev, false);
2846 	rtw89_pci_clr_idx_all(rtwdev);
2847 
2848 	return 0;
2849 }
2850 
rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev * rtwdev)2851 static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
2852 {
2853 	const struct rtw89_pci_info *info = rtwdev->pci_info;
2854 	int ret;
2855 
2856 	rtw89_pci_ber(rtwdev);
2857 	rtw89_pci_rxdma_prefth(rtwdev);
2858 	rtw89_pci_l1off_pwroff(rtwdev);
2859 	rtw89_pci_deglitch_setting(rtwdev);
2860 	ret = rtw89_pci_l2_rxen_lat(rtwdev);
2861 	if (ret) {
2862 		rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
2863 		return ret;
2864 	}
2865 
2866 	rtw89_pci_aphy_pwrcut(rtwdev);
2867 	rtw89_pci_hci_ldo(rtwdev);
2868 	rtw89_pci_dphy_delay(rtwdev);
2869 
2870 	ret = rtw89_pci_autok_x(rtwdev);
2871 	if (ret) {
2872 		rtw89_err(rtwdev, "[ERR] pcie autok_x fail %d\n", ret);
2873 		return ret;
2874 	}
2875 
2876 	ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
2877 	if (ret) {
2878 		rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
2879 		return ret;
2880 	}
2881 
2882 	rtw89_pci_power_wake_ax(rtwdev, true);
2883 	rtw89_pci_autoload_hang(rtwdev);
2884 	rtw89_pci_l12_vmain(rtwdev);
2885 	rtw89_pci_gen2_force_ib(rtwdev);
2886 	rtw89_pci_l1_ent_lat(rtwdev);
2887 	rtw89_pci_wd_exit_l1(rtwdev);
2888 	rtw89_pci_set_sic(rtwdev);
2889 	rtw89_pci_set_lbc(rtwdev);
2890 	rtw89_pci_set_io_rcy(rtwdev);
2891 	rtw89_pci_set_dbg(rtwdev);
2892 	rtw89_pci_set_keep_reg(rtwdev);
2893 
2894 	rtw89_write32_set(rtwdev, info->dma_stop1.addr, B_AX_STOP_WPDMA);
2895 
2896 	/* stop DMA activities */
2897 	rtw89_pci_ctrl_dma_all(rtwdev, false);
2898 
2899 	ret = rtw89_pci_poll_dma_all_idle(rtwdev);
2900 	if (ret) {
2901 		rtw89_err(rtwdev, "[ERR] poll pcie dma all idle\n");
2902 		return ret;
2903 	}
2904 
2905 	rtw89_pci_clr_idx_all(rtwdev);
2906 	rtw89_pci_mode_op(rtwdev);
2907 
2908 	/* fill TRX BD indexes */
2909 	rtw89_pci_ops_reset(rtwdev);
2910 
2911 	ret = rtw89_pci_rst_bdram_ax(rtwdev);
2912 	if (ret) {
2913 		rtw89_warn(rtwdev, "reset bdram busy\n");
2914 		return ret;
2915 	}
2916 
2917 	/* disable all channels except to FW CMD channel to download firmware */
2918 	rtw89_pci_ctrl_txdma_ch_ax(rtwdev, false);
2919 	rtw89_pci_ctrl_txdma_fw_ch_ax(rtwdev, true);
2920 
2921 	/* start DMA activities */
2922 	rtw89_pci_ctrl_dma_all(rtwdev, true);
2923 
2924 	return 0;
2925 }
2926 
rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev * rtwdev)2927 static int rtw89_pci_ops_mac_pre_deinit_ax(struct rtw89_dev *rtwdev)
2928 {
2929 	rtw89_pci_power_wake_ax(rtwdev, false);
2930 
2931 	return 0;
2932 }
2933 
rtw89_pci_ltr_set(struct rtw89_dev * rtwdev,bool en)2934 int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev, bool en)
2935 {
2936 	u32 val;
2937 
2938 	if (!en)
2939 		return 0;
2940 
2941 	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2942 	if (rtw89_pci_ltr_is_err_reg_val(val))
2943 		return -EINVAL;
2944 	val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2945 	if (rtw89_pci_ltr_is_err_reg_val(val))
2946 		return -EINVAL;
2947 	val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
2948 	if (rtw89_pci_ltr_is_err_reg_val(val))
2949 		return -EINVAL;
2950 	val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
2951 	if (rtw89_pci_ltr_is_err_reg_val(val))
2952 		return -EINVAL;
2953 
2954 	rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN | B_AX_LTR_EN |
2955 						   B_AX_LTR_WD_NOEMP_CHK);
2956 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
2957 			   PCI_LTR_SPC_500US);
2958 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
2959 			   PCI_LTR_IDLE_TIMER_3_2MS);
2960 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
2961 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
2962 	rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x90039003);
2963 	rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
2964 
2965 	return 0;
2966 }
2967 EXPORT_SYMBOL(rtw89_pci_ltr_set);
2968 
rtw89_pci_ltr_set_v1(struct rtw89_dev * rtwdev,bool en)2969 int rtw89_pci_ltr_set_v1(struct rtw89_dev *rtwdev, bool en)
2970 {
2971 	u32 dec_ctrl;
2972 	u32 val32;
2973 
2974 	val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
2975 	if (rtw89_pci_ltr_is_err_reg_val(val32))
2976 		return -EINVAL;
2977 	val32 = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
2978 	if (rtw89_pci_ltr_is_err_reg_val(val32))
2979 		return -EINVAL;
2980 	dec_ctrl = rtw89_read32(rtwdev, R_AX_LTR_DEC_CTRL);
2981 	if (rtw89_pci_ltr_is_err_reg_val(dec_ctrl))
2982 		return -EINVAL;
2983 	val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX3);
2984 	if (rtw89_pci_ltr_is_err_reg_val(val32))
2985 		return -EINVAL;
2986 	val32 = rtw89_read32(rtwdev, R_AX_LTR_LATENCY_IDX0);
2987 	if (rtw89_pci_ltr_is_err_reg_val(val32))
2988 		return -EINVAL;
2989 
2990 	if (!en) {
2991 		dec_ctrl &= ~(LTR_EN_BITS | B_AX_LTR_IDX_DRV_MASK | B_AX_LTR_HW_DEC_EN);
2992 		dec_ctrl |= FIELD_PREP(B_AX_LTR_IDX_DRV_MASK, PCIE_LTR_IDX_IDLE) |
2993 			    B_AX_LTR_REQ_DRV;
2994 	} else {
2995 		dec_ctrl |= B_AX_LTR_HW_DEC_EN;
2996 	}
2997 
2998 	dec_ctrl &= ~B_AX_LTR_SPACE_IDX_V1_MASK;
2999 	dec_ctrl |= FIELD_PREP(B_AX_LTR_SPACE_IDX_V1_MASK, PCI_LTR_SPC_500US);
3000 
3001 	if (en)
3002 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0,
3003 				  B_AX_LTR_WD_NOEMP_CHK_V1 | B_AX_LTR_HW_EN);
3004 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
3005 			   PCI_LTR_IDLE_TIMER_3_2MS);
3006 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
3007 	rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
3008 	rtw89_write32(rtwdev, R_AX_LTR_DEC_CTRL, dec_ctrl);
3009 	rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX3, 0x90039003);
3010 	rtw89_write32(rtwdev, R_AX_LTR_LATENCY_IDX0, 0x880b880b);
3011 
3012 	return 0;
3013 }
3014 EXPORT_SYMBOL(rtw89_pci_ltr_set_v1);
3015 
rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev * rtwdev)3016 static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
3017 {
3018 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3019 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3020 	int ret;
3021 
3022 	ret = info->ltr_set(rtwdev, true);
3023 	if (ret) {
3024 		rtw89_err(rtwdev, "pci ltr set fail\n");
3025 		return ret;
3026 	}
3027 	if (chip_id == RTL8852A) {
3028 		/* ltr sw trigger */
3029 		rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
3030 	}
3031 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3032 		/* ADDR info 8-byte mode */
3033 		rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
3034 				  B_AX_HOST_ADDR_INFO_8B_SEL);
3035 		rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
3036 	}
3037 
3038 	/* enable DMA for all queues */
3039 	rtw89_pci_ctrl_txdma_ch_ax(rtwdev, true);
3040 
3041 	/* Release PCI IO */
3042 	rtw89_write32_clr(rtwdev, info->dma_stop1.addr,
3043 			  B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
3044 
3045 	return 0;
3046 }
3047 
rtw89_pci_claim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3048 static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
3049 				  struct pci_dev *pdev)
3050 {
3051 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3052 	int ret;
3053 
3054 	ret = pci_enable_device(pdev);
3055 	if (ret) {
3056 		rtw89_err(rtwdev, "failed to enable pci device\n");
3057 		return ret;
3058 	}
3059 
3060 	pci_set_master(pdev);
3061 	pci_set_drvdata(pdev, rtwdev->hw);
3062 
3063 	rtwpci->pdev = pdev;
3064 
3065 	return 0;
3066 }
3067 
rtw89_pci_declaim_device(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3068 static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
3069 				     struct pci_dev *pdev)
3070 {
3071 	pci_disable_device(pdev);
3072 }
3073 
rtw89_pci_chip_is_manual_dac(struct rtw89_dev * rtwdev)3074 static bool rtw89_pci_chip_is_manual_dac(struct rtw89_dev *rtwdev)
3075 {
3076 	const struct rtw89_chip_info *chip = rtwdev->chip;
3077 
3078 	switch (chip->chip_id) {
3079 	case RTL8852A:
3080 	case RTL8852B:
3081 	case RTL8851B:
3082 	case RTL8852BT:
3083 		return true;
3084 	default:
3085 		return false;
3086 	}
3087 }
3088 
rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev * rtwdev)3089 static bool rtw89_pci_is_dac_compatible_bridge(struct rtw89_dev *rtwdev)
3090 {
3091 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3092 	struct pci_dev *bridge = pci_upstream_bridge(rtwpci->pdev);
3093 
3094 	if (!rtw89_pci_chip_is_manual_dac(rtwdev))
3095 		return true;
3096 
3097 	if (!bridge)
3098 		return false;
3099 
3100 	switch (bridge->vendor) {
3101 	case PCI_VENDOR_ID_INTEL:
3102 		return true;
3103 	case PCI_VENDOR_ID_ASMEDIA:
3104 		if (bridge->device == 0x2806)
3105 			return true;
3106 		break;
3107 	}
3108 
3109 	return false;
3110 }
3111 
rtw89_pci_cfg_dac(struct rtw89_dev * rtwdev,bool force)3112 static int rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev, bool force)
3113 {
3114 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3115 	struct pci_dev *pdev = rtwpci->pdev;
3116 	int ret;
3117 	u8 val;
3118 
3119 	if (!rtwpci->enable_dac && !force)
3120 		return 0;
3121 
3122 	if (!rtw89_pci_chip_is_manual_dac(rtwdev))
3123 		return 0;
3124 
3125 	/* Configure DAC only via PCI config API, not DBI interfaces */
3126 	ret = pci_read_config_byte(pdev, RTW89_PCIE_L1_CTRL, &val);
3127 	if (ret)
3128 		return ret;
3129 
3130 	val |= RTW89_PCIE_BIT_EN_64BITS;
3131 	return pci_write_config_byte(pdev, RTW89_PCIE_L1_CTRL, val);
3132 }
3133 
rtw89_pci_setup_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3134 static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
3135 				   struct pci_dev *pdev)
3136 {
3137 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3138 	unsigned long resource_len;
3139 	u8 bar_id = 2;
3140 	int ret;
3141 
3142 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
3143 	if (ret) {
3144 		rtw89_err(rtwdev, "failed to request pci regions\n");
3145 		goto err;
3146 	}
3147 
3148 	if (!rtw89_pci_is_dac_compatible_bridge(rtwdev))
3149 		goto try_dac_done;
3150 
3151 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
3152 	if (!ret) {
3153 		ret = rtw89_pci_cfg_dac(rtwdev, true);
3154 		if (!ret) {
3155 			rtwpci->enable_dac = true;
3156 			goto try_dac_done;
3157 		}
3158 
3159 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3160 		if (ret) {
3161 			rtw89_err(rtwdev,
3162 				  "failed to set dma and consistent mask to 32/36-bit\n");
3163 			goto err_release_regions;
3164 		}
3165 	}
3166 try_dac_done:
3167 
3168 	resource_len = pci_resource_len(pdev, bar_id);
3169 	rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
3170 	if (!rtwpci->mmap) {
3171 		rtw89_err(rtwdev, "failed to map pci io\n");
3172 		ret = -EIO;
3173 		goto err_release_regions;
3174 	}
3175 
3176 	return 0;
3177 
3178 err_release_regions:
3179 	pci_release_regions(pdev);
3180 err:
3181 	return ret;
3182 }
3183 
rtw89_pci_clear_mapping(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3184 static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
3185 				    struct pci_dev *pdev)
3186 {
3187 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3188 
3189 	if (rtwpci->mmap) {
3190 		pci_iounmap(pdev, rtwpci->mmap);
3191 		pci_release_regions(pdev);
3192 	}
3193 }
3194 
rtw89_pci_free_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)3195 static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
3196 				      struct pci_dev *pdev,
3197 				      struct rtw89_pci_tx_ring *tx_ring)
3198 {
3199 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3200 	u8 *head = wd_ring->head;
3201 	dma_addr_t dma = wd_ring->dma;
3202 	u32 page_size = wd_ring->page_size;
3203 	u32 page_num = wd_ring->page_num;
3204 	u32 ring_sz = page_size * page_num;
3205 
3206 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3207 	wd_ring->head = NULL;
3208 }
3209 
rtw89_pci_free_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring)3210 static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
3211 				   struct pci_dev *pdev,
3212 				   struct rtw89_pci_tx_ring *tx_ring)
3213 {
3214 	int ring_sz;
3215 	u8 *head;
3216 	dma_addr_t dma;
3217 
3218 	head = tx_ring->bd_ring.head;
3219 	dma = tx_ring->bd_ring.dma;
3220 	ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
3221 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3222 
3223 	tx_ring->bd_ring.head = NULL;
3224 }
3225 
rtw89_pci_free_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3226 static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
3227 				    struct pci_dev *pdev)
3228 {
3229 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3230 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3231 	struct rtw89_pci_tx_ring *tx_ring;
3232 	int i;
3233 
3234 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
3235 		if (info->tx_dma_ch_mask & BIT(i))
3236 			continue;
3237 		tx_ring = &rtwpci->tx_rings[i];
3238 		rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3239 		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3240 	}
3241 }
3242 
rtw89_pci_free_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring)3243 static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
3244 				   struct pci_dev *pdev,
3245 				   struct rtw89_pci_rx_ring *rx_ring)
3246 {
3247 	struct rtw89_pci_rx_info *rx_info;
3248 	struct sk_buff *skb;
3249 	dma_addr_t dma;
3250 	u32 buf_sz;
3251 	u8 *head;
3252 	int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
3253 	int i;
3254 
3255 	buf_sz = rx_ring->buf_sz;
3256 	for (i = 0; i < rx_ring->bd_ring.len; i++) {
3257 		skb = rx_ring->buf[i];
3258 		if (!skb)
3259 			continue;
3260 
3261 		rx_info = RTW89_PCI_RX_SKB_CB(skb);
3262 		dma = rx_info->dma;
3263 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3264 		dev_kfree_skb(skb);
3265 		rx_ring->buf[i] = NULL;
3266 	}
3267 
3268 	head = rx_ring->bd_ring.head;
3269 	dma = rx_ring->bd_ring.dma;
3270 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3271 
3272 	rx_ring->bd_ring.head = NULL;
3273 }
3274 
rtw89_pci_free_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3275 static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
3276 				    struct pci_dev *pdev)
3277 {
3278 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3279 	struct rtw89_pci_rx_ring *rx_ring;
3280 	int i;
3281 
3282 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
3283 		rx_ring = &rtwpci->rx_rings[i];
3284 		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3285 	}
3286 }
3287 
rtw89_pci_free_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3288 static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
3289 				     struct pci_dev *pdev)
3290 {
3291 	rtw89_pci_free_rx_rings(rtwdev, pdev);
3292 	rtw89_pci_free_tx_rings(rtwdev, pdev);
3293 }
3294 
rtw89_pci_init_rx_bd(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,struct sk_buff * skb,int buf_sz,u32 idx)3295 static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
3296 				struct rtw89_pci_rx_ring *rx_ring,
3297 				struct sk_buff *skb, int buf_sz, u32 idx)
3298 {
3299 	struct rtw89_pci_rx_info *rx_info;
3300 	struct rtw89_pci_rx_bd_32 *rx_bd;
3301 	dma_addr_t dma;
3302 
3303 	if (!skb)
3304 		return -EINVAL;
3305 
3306 	dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
3307 	if (dma_mapping_error(&pdev->dev, dma))
3308 		return -EBUSY;
3309 
3310 	rx_info = RTW89_PCI_RX_SKB_CB(skb);
3311 	rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
3312 
3313 	memset(rx_bd, 0, sizeof(*rx_bd));
3314 	rx_bd->buf_size = cpu_to_le16(buf_sz);
3315 	rx_bd->dma = cpu_to_le32(dma);
3316 	rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI);
3317 	rx_info->dma = dma;
3318 
3319 	return 0;
3320 }
3321 
rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,enum rtw89_tx_channel txch)3322 static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
3323 				      struct pci_dev *pdev,
3324 				      struct rtw89_pci_tx_ring *tx_ring,
3325 				      enum rtw89_tx_channel txch)
3326 {
3327 	struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
3328 	struct rtw89_pci_tx_wd *txwd;
3329 	dma_addr_t dma;
3330 	dma_addr_t cur_paddr;
3331 	u8 *head;
3332 	u8 *cur_vaddr;
3333 	u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
3334 	u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
3335 	u32 ring_sz = page_size * page_num;
3336 	u32 page_offset;
3337 	int i;
3338 
3339 	/* FWCMD queue doesn't use txwd as pages */
3340 	if (txch == RTW89_TXCH_CH12)
3341 		return 0;
3342 
3343 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3344 	if (!head)
3345 		return -ENOMEM;
3346 
3347 	INIT_LIST_HEAD(&wd_ring->free_pages);
3348 	wd_ring->head = head;
3349 	wd_ring->dma = dma;
3350 	wd_ring->page_size = page_size;
3351 	wd_ring->page_num = page_num;
3352 
3353 	page_offset = 0;
3354 	for (i = 0; i < page_num; i++) {
3355 		txwd = &wd_ring->pages[i];
3356 		cur_paddr = dma + page_offset;
3357 		cur_vaddr = head + page_offset;
3358 
3359 		skb_queue_head_init(&txwd->queue);
3360 		INIT_LIST_HEAD(&txwd->list);
3361 		txwd->paddr = cur_paddr;
3362 		txwd->vaddr = cur_vaddr;
3363 		txwd->len = page_size;
3364 		txwd->seq = i;
3365 		rtw89_pci_enqueue_txwd(tx_ring, txwd);
3366 
3367 		page_offset += page_size;
3368 	}
3369 
3370 	return 0;
3371 }
3372 
rtw89_pci_alloc_tx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_tx_ring * tx_ring,u32 desc_size,u32 len,enum rtw89_tx_channel txch)3373 static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
3374 				   struct pci_dev *pdev,
3375 				   struct rtw89_pci_tx_ring *tx_ring,
3376 				   u32 desc_size, u32 len,
3377 				   enum rtw89_tx_channel txch)
3378 {
3379 	const struct rtw89_pci_ch_dma_addr *txch_addr;
3380 	int ring_sz = desc_size * len;
3381 	u8 *head;
3382 	dma_addr_t dma;
3383 	int ret;
3384 
3385 	ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
3386 	if (ret) {
3387 		rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
3388 		goto err;
3389 	}
3390 
3391 	ret = rtw89_pci_get_txch_addrs(rtwdev, txch, &txch_addr);
3392 	if (ret) {
3393 		rtw89_err(rtwdev, "failed to get address of txch %d", txch);
3394 		goto err_free_wd_ring;
3395 	}
3396 
3397 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3398 	if (!head) {
3399 		ret = -ENOMEM;
3400 		goto err_free_wd_ring;
3401 	}
3402 
3403 	INIT_LIST_HEAD(&tx_ring->busy_pages);
3404 	tx_ring->bd_ring.head = head;
3405 	tx_ring->bd_ring.dma = dma;
3406 	tx_ring->bd_ring.len = len;
3407 	tx_ring->bd_ring.desc_size = desc_size;
3408 	tx_ring->bd_ring.addr = *txch_addr;
3409 	tx_ring->bd_ring.wp = 0;
3410 	tx_ring->bd_ring.rp = 0;
3411 	tx_ring->txch = txch;
3412 
3413 	return 0;
3414 
3415 err_free_wd_ring:
3416 	rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
3417 err:
3418 	return ret;
3419 }
3420 
rtw89_pci_alloc_tx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3421 static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
3422 				    struct pci_dev *pdev)
3423 {
3424 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3425 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3426 	struct rtw89_pci_tx_ring *tx_ring;
3427 	u32 desc_size;
3428 	u32 len;
3429 	u32 i, tx_allocated;
3430 	int ret;
3431 
3432 	for (i = 0; i < RTW89_TXCH_NUM; i++) {
3433 		if (info->tx_dma_ch_mask & BIT(i))
3434 			continue;
3435 		tx_ring = &rtwpci->tx_rings[i];
3436 		desc_size = sizeof(struct rtw89_pci_tx_bd_32);
3437 		len = RTW89_PCI_TXBD_NUM_MAX;
3438 		ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
3439 					      desc_size, len, i);
3440 		if (ret) {
3441 			rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
3442 			goto err_free;
3443 		}
3444 	}
3445 
3446 	return 0;
3447 
3448 err_free:
3449 	tx_allocated = i;
3450 	for (i = 0; i < tx_allocated; i++) {
3451 		tx_ring = &rtwpci->tx_rings[i];
3452 		rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
3453 	}
3454 
3455 	return ret;
3456 }
3457 
rtw89_pci_alloc_rx_ring(struct rtw89_dev * rtwdev,struct pci_dev * pdev,struct rtw89_pci_rx_ring * rx_ring,u32 desc_size,u32 len,u32 rxch)3458 static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
3459 				   struct pci_dev *pdev,
3460 				   struct rtw89_pci_rx_ring *rx_ring,
3461 				   u32 desc_size, u32 len, u32 rxch)
3462 {
3463 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3464 	const struct rtw89_pci_ch_dma_addr *rxch_addr;
3465 	struct sk_buff *skb;
3466 	u8 *head;
3467 	dma_addr_t dma;
3468 	int ring_sz = desc_size * len;
3469 	int buf_sz = RTW89_PCI_RX_BUF_SIZE;
3470 	int i, allocated;
3471 	int ret;
3472 
3473 	ret = rtw89_pci_get_rxch_addrs(rtwdev, rxch, &rxch_addr);
3474 	if (ret) {
3475 		rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
3476 		return ret;
3477 	}
3478 
3479 	head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
3480 	if (!head) {
3481 		ret = -ENOMEM;
3482 		goto err;
3483 	}
3484 
3485 	rx_ring->bd_ring.head = head;
3486 	rx_ring->bd_ring.dma = dma;
3487 	rx_ring->bd_ring.len = len;
3488 	rx_ring->bd_ring.desc_size = desc_size;
3489 	rx_ring->bd_ring.addr = *rxch_addr;
3490 	if (info->rx_ring_eq_is_full)
3491 		rx_ring->bd_ring.wp = len - 1;
3492 	else
3493 		rx_ring->bd_ring.wp = 0;
3494 	rx_ring->bd_ring.rp = 0;
3495 	rx_ring->buf_sz = buf_sz;
3496 	rx_ring->diliver_skb = NULL;
3497 	rx_ring->diliver_desc.ready = false;
3498 	rx_ring->target_rx_tag = 0;
3499 
3500 	for (i = 0; i < len; i++) {
3501 		skb = dev_alloc_skb(buf_sz);
3502 		if (!skb) {
3503 			ret = -ENOMEM;
3504 			goto err_free;
3505 		}
3506 
3507 		memset(skb->data, 0, buf_sz);
3508 		rx_ring->buf[i] = skb;
3509 		ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
3510 					   buf_sz, i);
3511 		if (ret) {
3512 			rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
3513 			dev_kfree_skb_any(skb);
3514 			rx_ring->buf[i] = NULL;
3515 			goto err_free;
3516 		}
3517 	}
3518 
3519 	return 0;
3520 
3521 err_free:
3522 	allocated = i;
3523 	for (i = 0; i < allocated; i++) {
3524 		skb = rx_ring->buf[i];
3525 		if (!skb)
3526 			continue;
3527 		dma = *((dma_addr_t *)skb->cb);
3528 		dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
3529 		dev_kfree_skb(skb);
3530 		rx_ring->buf[i] = NULL;
3531 	}
3532 
3533 	head = rx_ring->bd_ring.head;
3534 	dma = rx_ring->bd_ring.dma;
3535 	dma_free_coherent(&pdev->dev, ring_sz, head, dma);
3536 
3537 	rx_ring->bd_ring.head = NULL;
3538 err:
3539 	return ret;
3540 }
3541 
rtw89_pci_alloc_rx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3542 static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
3543 				    struct pci_dev *pdev)
3544 {
3545 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3546 	struct rtw89_pci_rx_ring *rx_ring;
3547 	u32 desc_size;
3548 	u32 len;
3549 	int i, rx_allocated;
3550 	int ret;
3551 
3552 	for (i = 0; i < RTW89_RXCH_NUM; i++) {
3553 		rx_ring = &rtwpci->rx_rings[i];
3554 		desc_size = sizeof(struct rtw89_pci_rx_bd_32);
3555 		len = RTW89_PCI_RXBD_NUM_MAX;
3556 		ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
3557 					      desc_size, len, i);
3558 		if (ret) {
3559 			rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
3560 			goto err_free;
3561 		}
3562 	}
3563 
3564 	return 0;
3565 
3566 err_free:
3567 	rx_allocated = i;
3568 	for (i = 0; i < rx_allocated; i++) {
3569 		rx_ring = &rtwpci->rx_rings[i];
3570 		rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
3571 	}
3572 
3573 	return ret;
3574 }
3575 
rtw89_pci_alloc_trx_rings(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3576 static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
3577 				     struct pci_dev *pdev)
3578 {
3579 	int ret;
3580 
3581 	ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
3582 	if (ret) {
3583 		rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
3584 		goto err;
3585 	}
3586 
3587 	ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
3588 	if (ret) {
3589 		rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
3590 		goto err_free_tx_rings;
3591 	}
3592 
3593 	return 0;
3594 
3595 err_free_tx_rings:
3596 	rtw89_pci_free_tx_rings(rtwdev, pdev);
3597 err:
3598 	return ret;
3599 }
3600 
rtw89_pci_h2c_init(struct rtw89_dev * rtwdev,struct rtw89_pci * rtwpci)3601 static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
3602 			       struct rtw89_pci *rtwpci)
3603 {
3604 	skb_queue_head_init(&rtwpci->h2c_queue);
3605 	skb_queue_head_init(&rtwpci->h2c_release_queue);
3606 }
3607 
rtw89_pci_setup_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3608 static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
3609 				    struct pci_dev *pdev)
3610 {
3611 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3612 	int ret;
3613 
3614 	ret = rtw89_pci_setup_mapping(rtwdev, pdev);
3615 	if (ret) {
3616 		rtw89_err(rtwdev, "failed to setup pci mapping\n");
3617 		goto err;
3618 	}
3619 
3620 	ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
3621 	if (ret) {
3622 		rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
3623 		goto err_pci_unmap;
3624 	}
3625 
3626 	rtw89_pci_h2c_init(rtwdev, rtwpci);
3627 
3628 	spin_lock_init(&rtwpci->irq_lock);
3629 	spin_lock_init(&rtwpci->trx_lock);
3630 
3631 	return 0;
3632 
3633 err_pci_unmap:
3634 	rtw89_pci_clear_mapping(rtwdev, pdev);
3635 err:
3636 	return ret;
3637 }
3638 
rtw89_pci_clear_resource(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3639 static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
3640 				     struct pci_dev *pdev)
3641 {
3642 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3643 
3644 	rtw89_pci_free_trx_rings(rtwdev, pdev);
3645 	rtw89_pci_clear_mapping(rtwdev, pdev);
3646 	rtw89_pci_release_fwcmd(rtwdev, rtwpci,
3647 				skb_queue_len(&rtwpci->h2c_queue), true);
3648 }
3649 
rtw89_pci_config_intr_mask(struct rtw89_dev * rtwdev)3650 void rtw89_pci_config_intr_mask(struct rtw89_dev *rtwdev)
3651 {
3652 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3653 	const struct rtw89_chip_info *chip = rtwdev->chip;
3654 	u32 hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN;
3655 
3656 	if (chip->chip_id == RTL8851B)
3657 		hs0isr_ind_int_en = B_AX_HS0ISR_IND_INT_EN_WKARND;
3658 
3659 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
3660 
3661 	if (rtwpci->under_recovery) {
3662 		rtwpci->intrs[0] = hs0isr_ind_int_en;
3663 		rtwpci->intrs[1] = 0;
3664 	} else {
3665 		rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3666 				   B_AX_RXDMA_INT_EN |
3667 				   B_AX_RXP1DMA_INT_EN |
3668 				   B_AX_RPQDMA_INT_EN |
3669 				   B_AX_RXDMA_STUCK_INT_EN |
3670 				   B_AX_RDU_INT_EN |
3671 				   B_AX_RPQBD_FULL_INT_EN |
3672 				   hs0isr_ind_int_en;
3673 
3674 		rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
3675 	}
3676 }
3677 EXPORT_SYMBOL(rtw89_pci_config_intr_mask);
3678 
rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev * rtwdev)3679 static void rtw89_pci_recovery_intr_mask_v1(struct rtw89_dev *rtwdev)
3680 {
3681 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3682 
3683 	rtwpci->ind_intrs = B_AX_HS0ISR_IND_INT_EN;
3684 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3685 	rtwpci->intrs[0] = 0;
3686 	rtwpci->intrs[1] = 0;
3687 }
3688 
rtw89_pci_default_intr_mask_v1(struct rtw89_dev * rtwdev)3689 static void rtw89_pci_default_intr_mask_v1(struct rtw89_dev *rtwdev)
3690 {
3691 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3692 
3693 	rtwpci->ind_intrs = B_AX_HCI_AXIDMA_INT_EN |
3694 			    B_AX_HS1ISR_IND_INT_EN |
3695 			    B_AX_HS0ISR_IND_INT_EN;
3696 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3697 	rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
3698 			   B_AX_RXDMA_INT_EN |
3699 			   B_AX_RXP1DMA_INT_EN |
3700 			   B_AX_RPQDMA_INT_EN |
3701 			   B_AX_RXDMA_STUCK_INT_EN |
3702 			   B_AX_RDU_INT_EN |
3703 			   B_AX_RPQBD_FULL_INT_EN;
3704 	rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3705 }
3706 
rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev * rtwdev)3707 static void rtw89_pci_low_power_intr_mask_v1(struct rtw89_dev *rtwdev)
3708 {
3709 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3710 
3711 	rtwpci->ind_intrs = B_AX_HS1ISR_IND_INT_EN |
3712 			    B_AX_HS0ISR_IND_INT_EN;
3713 	rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | B_AX_WDT_TIMEOUT_INT_EN;
3714 	rtwpci->intrs[0] = 0;
3715 	rtwpci->intrs[1] = B_AX_GPIO18_INT_EN;
3716 }
3717 
rtw89_pci_config_intr_mask_v1(struct rtw89_dev * rtwdev)3718 void rtw89_pci_config_intr_mask_v1(struct rtw89_dev *rtwdev)
3719 {
3720 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3721 
3722 	if (rtwpci->under_recovery)
3723 		rtw89_pci_recovery_intr_mask_v1(rtwdev);
3724 	else if (rtwpci->low_power)
3725 		rtw89_pci_low_power_intr_mask_v1(rtwdev);
3726 	else
3727 		rtw89_pci_default_intr_mask_v1(rtwdev);
3728 }
3729 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v1);
3730 
rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev * rtwdev)3731 static void rtw89_pci_recovery_intr_mask_v2(struct rtw89_dev *rtwdev)
3732 {
3733 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3734 
3735 	rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0;
3736 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3737 	rtwpci->intrs[0] = 0;
3738 	rtwpci->intrs[1] = 0;
3739 }
3740 
rtw89_pci_default_intr_mask_v2(struct rtw89_dev * rtwdev)3741 static void rtw89_pci_default_intr_mask_v2(struct rtw89_dev *rtwdev)
3742 {
3743 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3744 
3745 	rtwpci->ind_intrs = B_BE_HCI_AXIDMA_INT_EN0 |
3746 			    B_BE_HS0_IND_INT_EN0;
3747 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3748 	rtwpci->intrs[0] = B_BE_RDU_CH1_INT_IMR_V1 |
3749 			   B_BE_RDU_CH0_INT_IMR_V1;
3750 	rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3751 			   B_BE_PCIE_RX_RPQ0_IMR0_V1;
3752 }
3753 
rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev * rtwdev)3754 static void rtw89_pci_low_power_intr_mask_v2(struct rtw89_dev *rtwdev)
3755 {
3756 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3757 
3758 	rtwpci->ind_intrs = B_BE_HS0_IND_INT_EN0 |
3759 			    B_BE_HS1_IND_INT_EN0;
3760 	rtwpci->halt_c2h_intrs = B_BE_HALT_C2H_INT_EN | B_BE_WDT_TIMEOUT_INT_EN;
3761 	rtwpci->intrs[0] = 0;
3762 	rtwpci->intrs[1] = B_BE_PCIE_RX_RX0P2_IMR0_V1 |
3763 			   B_BE_PCIE_RX_RPQ0_IMR0_V1;
3764 }
3765 
rtw89_pci_config_intr_mask_v2(struct rtw89_dev * rtwdev)3766 void rtw89_pci_config_intr_mask_v2(struct rtw89_dev *rtwdev)
3767 {
3768 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3769 
3770 	if (rtwpci->under_recovery)
3771 		rtw89_pci_recovery_intr_mask_v2(rtwdev);
3772 	else if (rtwpci->low_power)
3773 		rtw89_pci_low_power_intr_mask_v2(rtwdev);
3774 	else
3775 		rtw89_pci_default_intr_mask_v2(rtwdev);
3776 }
3777 EXPORT_SYMBOL(rtw89_pci_config_intr_mask_v2);
3778 
rtw89_pci_request_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3779 static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
3780 				 struct pci_dev *pdev)
3781 {
3782 	unsigned long flags = 0;
3783 	int ret;
3784 
3785 	flags |= PCI_IRQ_INTX | PCI_IRQ_MSI;
3786 	ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
3787 	if (ret < 0) {
3788 		rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
3789 		goto err;
3790 	}
3791 
3792 	ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
3793 					rtw89_pci_interrupt_handler,
3794 					rtw89_pci_interrupt_threadfn,
3795 					IRQF_SHARED, KBUILD_MODNAME, rtwdev);
3796 	if (ret) {
3797 		rtw89_err(rtwdev, "failed to request threaded irq\n");
3798 		goto err_free_vector;
3799 	}
3800 
3801 	rtw89_chip_config_intr_mask(rtwdev, RTW89_PCI_INTR_MASK_RESET);
3802 
3803 	return 0;
3804 
3805 err_free_vector:
3806 	pci_free_irq_vectors(pdev);
3807 err:
3808 	return ret;
3809 }
3810 
rtw89_pci_free_irq(struct rtw89_dev * rtwdev,struct pci_dev * pdev)3811 static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
3812 			       struct pci_dev *pdev)
3813 {
3814 	devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
3815 	pci_free_irq_vectors(pdev);
3816 }
3817 
gray_code_to_bin(u16 gray_code)3818 static u16 gray_code_to_bin(u16 gray_code)
3819 {
3820 	u16 binary = gray_code;
3821 
3822 	while (gray_code) {
3823 		gray_code >>= 1;
3824 		binary ^= gray_code;
3825 	}
3826 
3827 	return binary;
3828 }
3829 
rtw89_pci_filter_out(struct rtw89_dev * rtwdev)3830 static int rtw89_pci_filter_out(struct rtw89_dev *rtwdev)
3831 {
3832 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
3833 	struct pci_dev *pdev = rtwpci->pdev;
3834 	u16 val16, filter_out_val;
3835 	u32 val, phy_offset;
3836 	int ret;
3837 
3838 	if (rtwdev->chip->chip_id != RTL8852C)
3839 		return 0;
3840 
3841 	val = rtw89_read32_mask(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
3842 	if (val == B_AX_ASPM_CTRL_L1)
3843 		return 0;
3844 
3845 	ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
3846 	if (ret)
3847 		return ret;
3848 
3849 	val = FIELD_GET(RTW89_BCFG_LINK_SPEED_MASK, val);
3850 	if (val == RTW89_PCIE_GEN1_SPEED) {
3851 		phy_offset = R_RAC_DIRECT_OFFSET_G1;
3852 	} else if (val == RTW89_PCIE_GEN2_SPEED) {
3853 		phy_offset = R_RAC_DIRECT_OFFSET_G2;
3854 		val16 = rtw89_read16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT);
3855 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT,
3856 				  val16 | B_PCIE_BIT_PINOUT_DIS);
3857 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT,
3858 				  val16 & ~B_PCIE_BIT_RD_SEL);
3859 
3860 		val16 = rtw89_read16_mask(rtwdev,
3861 					  phy_offset + RAC_ANA1F * RAC_MULT,
3862 					  FILTER_OUT_EQ_MASK);
3863 		val16 = gray_code_to_bin(val16);
3864 		filter_out_val = rtw89_read16(rtwdev, phy_offset + RAC_ANA24 *
3865 					      RAC_MULT);
3866 		filter_out_val &= ~REG_FILTER_OUT_MASK;
3867 		filter_out_val |= FIELD_PREP(REG_FILTER_OUT_MASK, val16);
3868 
3869 		rtw89_write16(rtwdev, phy_offset + RAC_ANA24 * RAC_MULT,
3870 			      filter_out_val);
3871 		rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0A * RAC_MULT,
3872 				  B_BAC_EQ_SEL);
3873 		rtw89_write16_set(rtwdev,
3874 				  R_RAC_DIRECT_OFFSET_G1 + RAC_ANA0C * RAC_MULT,
3875 				  B_PCIE_BIT_PSAVE);
3876 	} else {
3877 		return -EOPNOTSUPP;
3878 	}
3879 	rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0C * RAC_MULT,
3880 			  B_PCIE_BIT_PSAVE);
3881 
3882 	return 0;
3883 }
3884 
rtw89_pci_clkreq_set(struct rtw89_dev * rtwdev,bool enable)3885 static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
3886 {
3887 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3888 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3889 
3890 	if (rtw89_pci_disable_clkreq)
3891 		return;
3892 
3893 	gen_def->clkreq_set(rtwdev, enable);
3894 }
3895 
rtw89_pci_clkreq_set_ax(struct rtw89_dev * rtwdev,bool enable)3896 static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
3897 {
3898 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3899 	int ret;
3900 
3901 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
3902 					  PCIE_CLKDLY_HW_30US);
3903 	if (ret)
3904 		rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
3905 
3906 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3907 		if (enable)
3908 			ret = rtw89_pci_config_byte_set(rtwdev,
3909 							RTW89_PCIE_L1_CTRL,
3910 							RTW89_PCIE_BIT_CLK);
3911 		else
3912 			ret = rtw89_pci_config_byte_clr(rtwdev,
3913 							RTW89_PCIE_L1_CTRL,
3914 							RTW89_PCIE_BIT_CLK);
3915 		if (ret)
3916 			rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
3917 				  enable ? "set" : "unset", ret);
3918 	} else if (chip_id == RTL8852C) {
3919 		rtw89_write32_set(rtwdev, R_AX_PCIE_LAT_CTRL,
3920 				  B_AX_CLK_REQ_SEL_OPT | B_AX_CLK_REQ_SEL);
3921 		if (enable)
3922 			rtw89_write32_set(rtwdev, R_AX_L1_CLK_CTRL,
3923 					  B_AX_CLK_REQ_N);
3924 		else
3925 			rtw89_write32_clr(rtwdev, R_AX_L1_CLK_CTRL,
3926 					  B_AX_CLK_REQ_N);
3927 	}
3928 }
3929 
rtw89_pci_aspm_set(struct rtw89_dev * rtwdev,bool enable)3930 static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
3931 {
3932 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3933 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
3934 
3935 	if (rtw89_pci_disable_aspm_l1)
3936 		return;
3937 
3938 	gen_def->aspm_set(rtwdev, enable);
3939 }
3940 
rtw89_pci_aspm_set_ax(struct rtw89_dev * rtwdev,bool enable)3941 static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
3942 {
3943 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
3944 	u8 value = 0;
3945 	int ret;
3946 
3947 	ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
3948 	if (ret)
3949 		rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
3950 
3951 	u8p_replace_bits(&value, PCIE_L1DLY_16US, RTW89_L1DLY_MASK);
3952 	u8p_replace_bits(&value, PCIE_L0SDLY_4US, RTW89_L0DLY_MASK);
3953 
3954 	ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
3955 	if (ret)
3956 		rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
3957 
3958 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
3959 		if (enable)
3960 			ret = rtw89_pci_config_byte_set(rtwdev,
3961 							RTW89_PCIE_L1_CTRL,
3962 							RTW89_PCIE_BIT_L1);
3963 		else
3964 			ret = rtw89_pci_config_byte_clr(rtwdev,
3965 							RTW89_PCIE_L1_CTRL,
3966 							RTW89_PCIE_BIT_L1);
3967 	} else if (chip_id == RTL8852C) {
3968 		if (enable)
3969 			rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3970 					  B_AX_ASPM_CTRL_L1);
3971 		else
3972 			rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
3973 					  B_AX_ASPM_CTRL_L1);
3974 	}
3975 	if (ret)
3976 		rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
3977 			  enable ? "set" : "unset", ret);
3978 }
3979 
rtw89_pci_recalc_int_mit(struct rtw89_dev * rtwdev)3980 static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
3981 {
3982 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
3983 	const struct rtw89_pci_info *info = rtwdev->pci_info;
3984 	struct rtw89_traffic_stats *stats = &rtwdev->stats;
3985 	enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
3986 	enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
3987 	u32 val = 0;
3988 
3989 	if (rtwdev->scanning ||
3990 	    (tx_tfc_lv < RTW89_TFC_HIGH && rx_tfc_lv < RTW89_TFC_HIGH))
3991 		goto out;
3992 
3993 	if (chip_gen == RTW89_CHIP_BE)
3994 		val = B_BE_PCIE_MIT_RX0P2_EN | B_BE_PCIE_MIT_RX0P1_EN;
3995 	else
3996 		val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
3997 		      FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
3998 		      FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
3999 		      FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
4000 
4001 out:
4002 	rtw89_write32(rtwdev, info->mit_addr, val);
4003 }
4004 
rtw89_pci_link_cfg(struct rtw89_dev * rtwdev)4005 static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
4006 {
4007 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4008 	struct pci_dev *pdev = rtwpci->pdev;
4009 	u16 link_ctrl;
4010 	int ret;
4011 
4012 	/* Though there is standard PCIE configuration space to set the
4013 	 * link control register, but by Realtek's design, driver should
4014 	 * check if host supports CLKREQ/ASPM to enable the HW module.
4015 	 *
4016 	 * These functions are implemented by two HW modules associated,
4017 	 * one is responsible to access PCIE configuration space to
4018 	 * follow the host settings, and another is in charge of doing
4019 	 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
4020 	 * the host does not support it, and due to some reasons or wrong
4021 	 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
4022 	 * loss if HW misbehaves on the link.
4023 	 *
4024 	 * Hence it's designed that driver should first check the PCIE
4025 	 * configuration space is sync'ed and enabled, then driver can turn
4026 	 * on the other module that is actually working on the mechanism.
4027 	 */
4028 	ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
4029 	if (ret) {
4030 		rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
4031 		return;
4032 	}
4033 
4034 	if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
4035 		rtw89_pci_clkreq_set(rtwdev, true);
4036 
4037 	if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
4038 		rtw89_pci_aspm_set(rtwdev, true);
4039 }
4040 
rtw89_pci_l1ss_set(struct rtw89_dev * rtwdev,bool enable)4041 static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
4042 {
4043 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4044 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4045 
4046 	if (rtw89_pci_disable_l1ss)
4047 		return;
4048 
4049 	gen_def->l1ss_set(rtwdev, enable);
4050 }
4051 
rtw89_pci_l1ss_set_ax(struct rtw89_dev * rtwdev,bool enable)4052 static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
4053 {
4054 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4055 	int ret;
4056 
4057 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4058 		if (enable)
4059 			ret = rtw89_pci_config_byte_set(rtwdev,
4060 							RTW89_PCIE_TIMER_CTRL,
4061 							RTW89_PCIE_BIT_L1SUB);
4062 		else
4063 			ret = rtw89_pci_config_byte_clr(rtwdev,
4064 							RTW89_PCIE_TIMER_CTRL,
4065 							RTW89_PCIE_BIT_L1SUB);
4066 		if (ret)
4067 			rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
4068 				  enable ? "set" : "unset", ret);
4069 	} else if (chip_id == RTL8852C) {
4070 		ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1SS_STS_V1,
4071 						RTW89_PCIE_BIT_ASPM_L11 |
4072 						RTW89_PCIE_BIT_PCI_L11);
4073 		if (ret)
4074 			rtw89_warn(rtwdev, "failed to unset ASPM L1.1, ret=%d", ret);
4075 		if (enable)
4076 			rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4077 					  B_AX_L1SUB_DISABLE);
4078 		else
4079 			rtw89_write32_set(rtwdev, R_AX_PCIE_MIX_CFG_V1,
4080 					  B_AX_L1SUB_DISABLE);
4081 	}
4082 }
4083 
rtw89_pci_l1ss_cfg(struct rtw89_dev * rtwdev)4084 static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
4085 {
4086 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4087 	struct pci_dev *pdev = rtwpci->pdev;
4088 	u32 l1ss_cap_ptr, l1ss_ctrl;
4089 
4090 	if (rtw89_pci_disable_l1ss)
4091 		return;
4092 
4093 	l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
4094 	if (!l1ss_cap_ptr)
4095 		return;
4096 
4097 	pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
4098 
4099 	if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
4100 		rtw89_pci_l1ss_set(rtwdev, true);
4101 }
4102 
rtw89_pci_cpl_timeout_cfg(struct rtw89_dev * rtwdev)4103 static void rtw89_pci_cpl_timeout_cfg(struct rtw89_dev *rtwdev)
4104 {
4105 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4106 	struct pci_dev *pdev = rtwpci->pdev;
4107 
4108 	pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
4109 				 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
4110 }
4111 
rtw89_pci_poll_io_idle_ax(struct rtw89_dev * rtwdev)4112 static int rtw89_pci_poll_io_idle_ax(struct rtw89_dev *rtwdev)
4113 {
4114 	int ret = 0;
4115 	u32 sts;
4116 	u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
4117 
4118 	ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
4119 				       10, 1000, false, rtwdev,
4120 				       R_AX_PCIE_DMA_BUSY1);
4121 	if (ret) {
4122 		rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
4123 			  rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
4124 		return -EINVAL;
4125 	}
4126 	return ret;
4127 }
4128 
rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev * rtwdev)4129 static int rtw89_pci_lv1rst_stop_dma_ax(struct rtw89_dev *rtwdev)
4130 {
4131 	u32 val;
4132 	int ret;
4133 
4134 	if (rtwdev->chip->chip_id == RTL8852C)
4135 		return 0;
4136 
4137 	rtw89_pci_ctrl_dma_all(rtwdev, false);
4138 	ret = rtw89_pci_poll_io_idle_ax(rtwdev);
4139 	if (ret) {
4140 		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4141 		rtw89_debug(rtwdev, RTW89_DBG_HCI,
4142 			    "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
4143 			    R_AX_DBG_ERR_FLAG, val);
4144 		if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
4145 			rtw89_mac_ctrl_hci_dma_tx(rtwdev, false);
4146 		if (val & B_AX_RX_STUCK)
4147 			rtw89_mac_ctrl_hci_dma_rx(rtwdev, false);
4148 		rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
4149 		ret = rtw89_pci_poll_io_idle_ax(rtwdev);
4150 		val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
4151 		rtw89_debug(rtwdev, RTW89_DBG_HCI,
4152 			    "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
4153 			    R_AX_DBG_ERR_FLAG, val);
4154 	}
4155 
4156 	return ret;
4157 }
4158 
rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev * rtwdev)4159 static int rtw89_pci_lv1rst_start_dma_ax(struct rtw89_dev *rtwdev)
4160 {
4161 	u32 ret;
4162 
4163 	if (rtwdev->chip->chip_id == RTL8852C)
4164 		return 0;
4165 
4166 	rtw89_mac_ctrl_hci_dma_trx(rtwdev, false);
4167 	rtw89_mac_ctrl_hci_dma_trx(rtwdev, true);
4168 	rtw89_pci_clr_idx_all(rtwdev);
4169 
4170 	ret = rtw89_pci_rst_bdram_ax(rtwdev);
4171 	if (ret)
4172 		return ret;
4173 
4174 	rtw89_pci_ctrl_dma_all(rtwdev, true);
4175 	return ret;
4176 }
4177 
rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev * rtwdev,enum rtw89_lv1_rcvy_step step)4178 static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
4179 					  enum rtw89_lv1_rcvy_step step)
4180 {
4181 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4182 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4183 	int ret;
4184 
4185 	switch (step) {
4186 	case RTW89_LV1_RCVY_STEP_1:
4187 		ret = gen_def->lv1rst_stop_dma(rtwdev);
4188 		if (ret)
4189 			rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
4190 
4191 		break;
4192 
4193 	case RTW89_LV1_RCVY_STEP_2:
4194 		ret = gen_def->lv1rst_start_dma(rtwdev);
4195 		if (ret)
4196 			rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
4197 		break;
4198 
4199 	default:
4200 		return -EINVAL;
4201 	}
4202 
4203 	return ret;
4204 }
4205 
rtw89_pci_ops_dump_err_status(struct rtw89_dev * rtwdev)4206 static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
4207 {
4208 	if (rtwdev->chip->chip_gen == RTW89_CHIP_BE)
4209 		return;
4210 
4211 	if (rtwdev->chip->chip_id == RTL8852C) {
4212 		rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4213 			   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG_V1));
4214 		rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4215 			   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG_V1));
4216 	} else {
4217 		rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
4218 			   rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
4219 		rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
4220 			   rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
4221 		rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
4222 			   rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
4223 	}
4224 }
4225 
rtw89_pci_napi_poll(struct napi_struct * napi,int budget)4226 static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
4227 {
4228 	struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
4229 	struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
4230 	const struct rtw89_pci_info *info = rtwdev->pci_info;
4231 	const struct rtw89_pci_gen_def *gen_def = info->gen_def;
4232 	unsigned long flags;
4233 	int work_done;
4234 
4235 	rtwdev->napi_budget_countdown = budget;
4236 
4237 	rtw89_write32(rtwdev, gen_def->isr_clear_rpq.addr, gen_def->isr_clear_rpq.data);
4238 	work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4239 	if (work_done == budget)
4240 		return budget;
4241 
4242 	rtw89_write32(rtwdev, gen_def->isr_clear_rxq.addr, gen_def->isr_clear_rxq.data);
4243 	work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
4244 	if (work_done < budget && napi_complete_done(napi, work_done)) {
4245 		spin_lock_irqsave(&rtwpci->irq_lock, flags);
4246 		if (likely(rtwpci->running))
4247 			rtw89_chip_enable_intr(rtwdev, rtwpci);
4248 		spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
4249 	}
4250 
4251 	return work_done;
4252 }
4253 
4254 static
rtw89_check_pci_ssid_quirks(struct rtw89_dev * rtwdev,struct pci_dev * pdev,const struct rtw89_pci_ssid_quirk * ssid_quirks)4255 void rtw89_check_pci_ssid_quirks(struct rtw89_dev *rtwdev,
4256 				 struct pci_dev *pdev,
4257 				 const struct rtw89_pci_ssid_quirk *ssid_quirks)
4258 {
4259 	int i;
4260 
4261 	if (!ssid_quirks)
4262 		return;
4263 
4264 	for (i = 0; i < 200; i++, ssid_quirks++) {
4265 		if (ssid_quirks->vendor == 0 && ssid_quirks->device == 0)
4266 			break;
4267 
4268 		if (ssid_quirks->vendor != pdev->vendor ||
4269 		    ssid_quirks->device != pdev->device ||
4270 		    ssid_quirks->subsystem_vendor != pdev->subsystem_vendor ||
4271 		    ssid_quirks->subsystem_device != pdev->subsystem_device)
4272 			continue;
4273 
4274 		bitmap_or(rtwdev->quirks, rtwdev->quirks, &ssid_quirks->bitmap,
4275 			  NUM_OF_RTW89_QUIRKS);
4276 		rtwdev->custid = ssid_quirks->custid;
4277 		break;
4278 	}
4279 
4280 	rtw89_debug(rtwdev, RTW89_DBG_HCI, "quirks=%*ph custid=%d\n",
4281 		    (int)sizeof(rtwdev->quirks), rtwdev->quirks, rtwdev->custid);
4282 }
4283 
rtw89_pci_suspend(struct device * dev)4284 static int __maybe_unused rtw89_pci_suspend(struct device *dev)
4285 {
4286 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
4287 	struct rtw89_dev *rtwdev = hw->priv;
4288 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4289 
4290 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4291 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4292 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4293 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4294 		rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
4295 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4296 		rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
4297 				  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4298 	} else {
4299 		rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4300 				  B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4301 	}
4302 
4303 	return 0;
4304 }
4305 
rtw89_pci_l2_hci_ldo(struct rtw89_dev * rtwdev)4306 static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
4307 {
4308 	if (rtwdev->chip->chip_id == RTL8852C)
4309 		return;
4310 
4311 	/* Hardware need write the reg twice to ensure the setting work */
4312 	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4313 				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
4314 	rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
4315 				    RTW89_PCIE_BIT_CFG_RST_MSTATE);
4316 }
4317 
rtw89_pci_basic_cfg(struct rtw89_dev * rtwdev,bool resume)4318 void rtw89_pci_basic_cfg(struct rtw89_dev *rtwdev, bool resume)
4319 {
4320 	if (resume)
4321 		rtw89_pci_cfg_dac(rtwdev, false);
4322 
4323 	rtw89_pci_disable_eq(rtwdev);
4324 	rtw89_pci_filter_out(rtwdev);
4325 	rtw89_pci_cpl_timeout_cfg(rtwdev);
4326 	rtw89_pci_link_cfg(rtwdev);
4327 	rtw89_pci_l1ss_cfg(rtwdev);
4328 }
4329 
rtw89_pci_resume(struct device * dev)4330 static int __maybe_unused rtw89_pci_resume(struct device *dev)
4331 {
4332 	struct ieee80211_hw *hw = dev_get_drvdata(dev);
4333 	struct rtw89_dev *rtwdev = hw->priv;
4334 	enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
4335 
4336 	rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4337 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
4338 	rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
4339 	if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
4340 		rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
4341 				  B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
4342 		rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
4343 				  B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
4344 	} else {
4345 		rtw89_write32_set(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4346 				  B_AX_CMAC_EXIT_L1_EN | B_AX_DMAC0_EXIT_L1_EN);
4347 		rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL_V1,
4348 				  B_AX_SEL_REQ_ENTR_L1);
4349 	}
4350 	rtw89_pci_l2_hci_ldo(rtwdev);
4351 
4352 	rtw89_pci_basic_cfg(rtwdev, true);
4353 
4354 	return 0;
4355 }
4356 
4357 SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
4358 EXPORT_SYMBOL(rtw89_pm_ops);
4359 
rtw89_pci_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)4360 static pci_ers_result_t rtw89_pci_io_error_detected(struct pci_dev *pdev,
4361 						    pci_channel_state_t state)
4362 {
4363 	struct net_device *netdev = pci_get_drvdata(pdev);
4364 
4365 	netif_device_detach(netdev);
4366 
4367 	return PCI_ERS_RESULT_NEED_RESET;
4368 }
4369 
rtw89_pci_io_slot_reset(struct pci_dev * pdev)4370 static pci_ers_result_t rtw89_pci_io_slot_reset(struct pci_dev *pdev)
4371 {
4372 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4373 	struct rtw89_dev *rtwdev = hw->priv;
4374 
4375 	rtw89_ser_notify(rtwdev, MAC_AX_ERR_ASSERTION);
4376 
4377 	return PCI_ERS_RESULT_RECOVERED;
4378 }
4379 
rtw89_pci_io_resume(struct pci_dev * pdev)4380 static void rtw89_pci_io_resume(struct pci_dev *pdev)
4381 {
4382 	struct net_device *netdev = pci_get_drvdata(pdev);
4383 
4384 	/* ack any pending wake events, disable PME */
4385 	pci_enable_wake(pdev, PCI_D0, 0);
4386 
4387 	netif_device_attach(netdev);
4388 }
4389 
4390 const struct pci_error_handlers rtw89_pci_err_handler = {
4391 	.error_detected = rtw89_pci_io_error_detected,
4392 	.slot_reset = rtw89_pci_io_slot_reset,
4393 	.resume = rtw89_pci_io_resume,
4394 };
4395 EXPORT_SYMBOL(rtw89_pci_err_handler);
4396 
4397 const struct rtw89_pci_gen_def rtw89_pci_gen_ax = {
4398 	.isr_rdu = B_AX_RDU_INT,
4399 	.isr_halt_c2h = B_AX_HALT_C2H_INT_EN,
4400 	.isr_wdt_timeout = B_AX_WDT_TIMEOUT_INT_EN,
4401 	.isr_clear_rpq = {R_AX_PCIE_HISR00, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT},
4402 	.isr_clear_rxq = {R_AX_PCIE_HISR00, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT |
4403 					    B_AX_RDU_INT},
4404 
4405 	.mac_pre_init = rtw89_pci_ops_mac_pre_init_ax,
4406 	.mac_pre_deinit = rtw89_pci_ops_mac_pre_deinit_ax,
4407 	.mac_post_init = rtw89_pci_ops_mac_post_init_ax,
4408 
4409 	.clr_idx_all = rtw89_pci_clr_idx_all_ax,
4410 	.rst_bdram = rtw89_pci_rst_bdram_ax,
4411 
4412 	.lv1rst_stop_dma = rtw89_pci_lv1rst_stop_dma_ax,
4413 	.lv1rst_start_dma = rtw89_pci_lv1rst_start_dma_ax,
4414 
4415 	.ctrl_txdma_ch = rtw89_pci_ctrl_txdma_ch_ax,
4416 	.ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch_ax,
4417 	.poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle_ax,
4418 
4419 	.aspm_set = rtw89_pci_aspm_set_ax,
4420 	.clkreq_set = rtw89_pci_clkreq_set_ax,
4421 	.l1ss_set = rtw89_pci_l1ss_set_ax,
4422 
4423 	.disable_eq = rtw89_pci_disable_eq_ax,
4424 	.power_wake = rtw89_pci_power_wake_ax,
4425 };
4426 EXPORT_SYMBOL(rtw89_pci_gen_ax);
4427 
4428 static const struct rtw89_hci_ops rtw89_pci_ops = {
4429 	.tx_write	= rtw89_pci_ops_tx_write,
4430 	.tx_kick_off	= rtw89_pci_ops_tx_kick_off,
4431 	.flush_queues	= rtw89_pci_ops_flush_queues,
4432 	.reset		= rtw89_pci_ops_reset,
4433 	.start		= rtw89_pci_ops_start,
4434 	.stop		= rtw89_pci_ops_stop,
4435 	.pause		= rtw89_pci_ops_pause,
4436 	.switch_mode	= rtw89_pci_ops_switch_mode,
4437 	.recalc_int_mit = rtw89_pci_recalc_int_mit,
4438 
4439 	.read8		= rtw89_pci_ops_read8,
4440 	.read16		= rtw89_pci_ops_read16,
4441 	.read32		= rtw89_pci_ops_read32,
4442 	.write8		= rtw89_pci_ops_write8,
4443 	.write16	= rtw89_pci_ops_write16,
4444 	.write32	= rtw89_pci_ops_write32,
4445 
4446 	.mac_pre_init	= rtw89_pci_ops_mac_pre_init,
4447 	.mac_pre_deinit	= rtw89_pci_ops_mac_pre_deinit,
4448 	.mac_post_init	= rtw89_pci_ops_mac_post_init,
4449 	.deinit		= rtw89_pci_ops_deinit,
4450 
4451 	.check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
4452 	.mac_lv1_rcvy	= rtw89_pci_ops_mac_lv1_recovery,
4453 	.dump_err_status = rtw89_pci_ops_dump_err_status,
4454 	.napi_poll	= rtw89_pci_napi_poll,
4455 
4456 	.recovery_start = rtw89_pci_ops_recovery_start,
4457 	.recovery_complete = rtw89_pci_ops_recovery_complete,
4458 
4459 	.ctrl_txdma_ch	= rtw89_pci_ctrl_txdma_ch,
4460 	.ctrl_txdma_fw_ch = rtw89_pci_ctrl_txdma_fw_ch,
4461 	.ctrl_trxhci	= rtw89_pci_ctrl_dma_trx,
4462 	.poll_txdma_ch_idle = rtw89_pci_poll_txdma_ch_idle,
4463 
4464 	.clr_idx_all	= rtw89_pci_clr_idx_all,
4465 	.clear		= rtw89_pci_clear_resource,
4466 	.disable_intr	= rtw89_pci_disable_intr_lock,
4467 	.enable_intr	= rtw89_pci_enable_intr_lock,
4468 	.rst_bdram	= rtw89_pci_reset_bdram,
4469 };
4470 
rtw89_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)4471 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4472 {
4473 	struct rtw89_dev *rtwdev;
4474 	const struct rtw89_driver_info *info;
4475 	const struct rtw89_pci_info *pci_info;
4476 	int ret;
4477 
4478 	info = (const struct rtw89_driver_info *)id->driver_data;
4479 
4480 	rtwdev = rtw89_alloc_ieee80211_hw(&pdev->dev,
4481 					  sizeof(struct rtw89_pci),
4482 					  info->chip, info->variant);
4483 	if (!rtwdev) {
4484 		dev_err(&pdev->dev, "failed to allocate hw\n");
4485 		return -ENOMEM;
4486 	}
4487 
4488 	pci_info = info->bus.pci;
4489 
4490 	rtwdev->pci_info = info->bus.pci;
4491 	rtwdev->hci.ops = &rtw89_pci_ops;
4492 	rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
4493 	rtwdev->hci.dle_type = RTW89_HCI_DLE_TYPE_PCIE;
4494 	rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
4495 	rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
4496 
4497 	rtw89_check_quirks(rtwdev, info->quirks);
4498 	rtw89_check_pci_ssid_quirks(rtwdev, pdev, pci_info->ssid_quirks);
4499 
4500 	SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
4501 
4502 	ret = rtw89_core_init(rtwdev);
4503 	if (ret) {
4504 		rtw89_err(rtwdev, "failed to initialise core\n");
4505 		goto err_release_hw;
4506 	}
4507 
4508 	ret = rtw89_pci_claim_device(rtwdev, pdev);
4509 	if (ret) {
4510 		rtw89_err(rtwdev, "failed to claim pci device\n");
4511 		goto err_core_deinit;
4512 	}
4513 
4514 	ret = rtw89_pci_setup_resource(rtwdev, pdev);
4515 	if (ret) {
4516 		rtw89_err(rtwdev, "failed to setup pci resource\n");
4517 		goto err_declaim_pci;
4518 	}
4519 
4520 	ret = rtw89_chip_info_setup(rtwdev);
4521 	if (ret) {
4522 		rtw89_err(rtwdev, "failed to setup chip information\n");
4523 		goto err_clear_resource;
4524 	}
4525 
4526 	rtw89_pci_basic_cfg(rtwdev, false);
4527 
4528 	ret = rtw89_core_napi_init(rtwdev);
4529 	if (ret) {
4530 		rtw89_err(rtwdev, "failed to init napi\n");
4531 		goto err_clear_resource;
4532 	}
4533 
4534 	ret = rtw89_pci_request_irq(rtwdev, pdev);
4535 	if (ret) {
4536 		rtw89_err(rtwdev, "failed to request pci irq\n");
4537 		goto err_deinit_napi;
4538 	}
4539 
4540 	ret = rtw89_core_register(rtwdev);
4541 	if (ret) {
4542 		rtw89_err(rtwdev, "failed to register core\n");
4543 		goto err_free_irq;
4544 	}
4545 
4546 	set_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags);
4547 
4548 	return 0;
4549 
4550 err_free_irq:
4551 	rtw89_pci_free_irq(rtwdev, pdev);
4552 err_deinit_napi:
4553 	rtw89_core_napi_deinit(rtwdev);
4554 err_clear_resource:
4555 	rtw89_pci_clear_resource(rtwdev, pdev);
4556 err_declaim_pci:
4557 	rtw89_pci_declaim_device(rtwdev, pdev);
4558 err_core_deinit:
4559 	rtw89_core_deinit(rtwdev);
4560 err_release_hw:
4561 	rtw89_free_ieee80211_hw(rtwdev);
4562 
4563 	return ret;
4564 }
4565 EXPORT_SYMBOL(rtw89_pci_probe);
4566 
rtw89_pci_remove(struct pci_dev * pdev)4567 void rtw89_pci_remove(struct pci_dev *pdev)
4568 {
4569 	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
4570 	struct rtw89_dev *rtwdev;
4571 
4572 	rtwdev = hw->priv;
4573 
4574 	rtw89_pci_free_irq(rtwdev, pdev);
4575 	rtw89_core_napi_deinit(rtwdev);
4576 	rtw89_core_unregister(rtwdev);
4577 	rtw89_pci_clear_resource(rtwdev, pdev);
4578 	rtw89_pci_declaim_device(rtwdev, pdev);
4579 	rtw89_core_deinit(rtwdev);
4580 	rtw89_free_ieee80211_hw(rtwdev);
4581 }
4582 EXPORT_SYMBOL(rtw89_pci_remove);
4583 
4584 MODULE_AUTHOR("Realtek Corporation");
4585 MODULE_DESCRIPTION("Realtek PCI 802.11ax wireless driver");
4586 MODULE_LICENSE("Dual BSD/GPL");
4587