xref: /freebsd/sys/contrib/dev/rtw88/mac.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4 
5 #include "main.h"
6 #include "mac.h"
7 #include "reg.h"
8 #include "fw.h"
9 #include "debug.h"
10 #include "sdio.h"
11 
12 void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
13 			 u8 primary_ch_idx)
14 {
15 	u8 txsc40 = 0, txsc20 = 0;
16 	u32 value32;
17 	u8 value8;
18 
19 	txsc20 = primary_ch_idx;
20 	if (bw == RTW_CHANNEL_WIDTH_80) {
21 		if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST)
22 			txsc40 = RTW_SC_40_UPPER;
23 		else
24 			txsc40 = RTW_SC_40_LOWER;
25 	}
26 	rtw_write8(rtwdev, REG_DATA_SC,
27 		   BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
28 
29 	value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
30 	value32 &= ~BIT_RFMOD;
31 	switch (bw) {
32 	case RTW_CHANNEL_WIDTH_80:
33 		value32 |= BIT_RFMOD_80M;
34 		break;
35 	case RTW_CHANNEL_WIDTH_40:
36 		value32 |= BIT_RFMOD_40M;
37 		break;
38 	case RTW_CHANNEL_WIDTH_20:
39 	default:
40 		break;
41 	}
42 	rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
43 
44 	if (rtw_chip_wcpu_11n(rtwdev))
45 		return;
46 
47 	value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
48 	value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
49 	rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
50 
51 	rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
52 	rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
53 
54 	value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
55 	value8 = value8 & ~BIT_CHECK_CCK_EN;
56 	if (IS_CH_5G_BAND(channel))
57 		value8 |= BIT_CHECK_CCK_EN;
58 	rtw_write8(rtwdev, REG_CCK_CHECK, value8);
59 }
60 EXPORT_SYMBOL(rtw_set_channel_mac);
61 
62 static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
63 {
64 	unsigned int retry;
65 	u32 value32;
66 	u8 value8;
67 
68 	rtw_write8(rtwdev, REG_RSV_CTRL, 0);
69 
70 	if (rtw_chip_wcpu_11n(rtwdev)) {
71 		if (rtw_read32(rtwdev, REG_SYS_CFG1) & BIT_LDO)
72 			rtw_write8(rtwdev, REG_LDO_SWR_CTRL, LDO_SEL);
73 		else
74 			rtw_write8(rtwdev, REG_LDO_SWR_CTRL, SPS_SEL);
75 		return 0;
76 	}
77 
78 	switch (rtw_hci_type(rtwdev)) {
79 	case RTW_HCI_TYPE_PCIE:
80 		rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
81 		break;
82 	case RTW_HCI_TYPE_SDIO:
83 		rtw_write8_clr(rtwdev, REG_SDIO_HSUS_CTRL, BIT_HCI_SUS_REQ);
84 
85 		for (retry = 0; retry < RTW_PWR_POLLING_CNT; retry++) {
86 			if (rtw_read8(rtwdev, REG_SDIO_HSUS_CTRL) & BIT_HCI_RESUME_RDY)
87 				break;
88 
89 			usleep_range(10, 50);
90 		}
91 
92 		if (retry == RTW_PWR_POLLING_CNT) {
93 			rtw_err(rtwdev, "failed to poll REG_SDIO_HSUS_CTRL[1]");
94 			return -ETIMEDOUT;
95 		}
96 
97 		if (rtw_sdio_is_sdio30_supported(rtwdev))
98 			rtw_write8_set(rtwdev, REG_HCI_OPT_CTRL + 2,
99 				       BIT_SDIO_PAD_E5 >> 16);
100 		else
101 			rtw_write8_clr(rtwdev, REG_HCI_OPT_CTRL + 2,
102 				       BIT_SDIO_PAD_E5 >> 16);
103 		break;
104 	case RTW_HCI_TYPE_USB:
105 		break;
106 	default:
107 		return -EINVAL;
108 	}
109 
110 	/* config PIN Mux */
111 	value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
112 	value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
113 	rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
114 
115 	value32 = rtw_read32(rtwdev, REG_LED_CFG);
116 	value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
117 	rtw_write32(rtwdev, REG_LED_CFG, value32);
118 
119 	value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
120 	value32 |= BIT_WLRFE_4_5_EN;
121 	rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
122 
123 	/* disable BB/RF */
124 	value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
125 	value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
126 	rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
127 
128 	value8 = rtw_read8(rtwdev, REG_RF_CTRL);
129 	value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
130 	rtw_write8(rtwdev, REG_RF_CTRL, value8);
131 
132 	value32 = rtw_read32(rtwdev, REG_WLRF1);
133 	value32 &= ~BIT_WLRF1_BBRF_EN;
134 	rtw_write32(rtwdev, REG_WLRF1, value32);
135 
136 	return 0;
137 }
138 
139 static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
140 {
141 	u32 val;
142 
143 	target &= mask;
144 
145 	return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target,
146 					50, 50 * RTW_PWR_POLLING_CNT, false,
147 					rtwdev, addr) == 0;
148 }
149 
150 static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
151 			       const struct rtw_pwr_seq_cmd *cmd)
152 {
153 	u8 value;
154 	u32 offset;
155 
156 	if (cmd->base == RTW_PWR_ADDR_SDIO)
157 		offset = cmd->offset | SDIO_LOCAL_OFFSET;
158 	else
159 		offset = cmd->offset;
160 
161 	if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
162 		return 0;
163 
164 	if (rtw_hci_type(rtwdev) != RTW_HCI_TYPE_PCIE)
165 		goto err;
166 
167 	/* if PCIE, toggle BIT_PFM_WOWL and try again */
168 	value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
169 	if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
170 		rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
171 	rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
172 	rtw_write8(rtwdev, REG_SYS_PW_CTRL, value & ~BIT_PFM_WOWL);
173 	if (rtwdev->chip->id == RTW_CHIP_TYPE_8723D)
174 		rtw_write8(rtwdev, REG_SYS_PW_CTRL, value | BIT_PFM_WOWL);
175 
176 	if (do_pwr_poll_cmd(rtwdev, offset, cmd->mask, cmd->value))
177 		return 0;
178 
179 err:
180 	rtw_err(rtwdev, "failed to poll offset=0x%x mask=0x%x value=0x%x\n",
181 		offset, cmd->mask, cmd->value);
182 	return -EBUSY;
183 }
184 
185 static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
186 				  u8 cut_mask,
187 				  const struct rtw_pwr_seq_cmd *cmd)
188 {
189 	const struct rtw_pwr_seq_cmd *cur_cmd;
190 	u32 offset;
191 	u8 value;
192 
193 	for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
194 		if (!(cur_cmd->intf_mask & intf_mask) ||
195 		    !(cur_cmd->cut_mask & cut_mask))
196 			continue;
197 
198 		switch (cur_cmd->cmd) {
199 		case RTW_PWR_CMD_WRITE:
200 			offset = cur_cmd->offset;
201 
202 			if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
203 				offset |= SDIO_LOCAL_OFFSET;
204 
205 			value = rtw_read8(rtwdev, offset);
206 			value &= ~cur_cmd->mask;
207 			value |= (cur_cmd->value & cur_cmd->mask);
208 			rtw_write8(rtwdev, offset, value);
209 			break;
210 		case RTW_PWR_CMD_POLLING:
211 			if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
212 				return -EBUSY;
213 			break;
214 		case RTW_PWR_CMD_DELAY:
215 			if (cur_cmd->value == RTW_PWR_DELAY_US)
216 				udelay(cur_cmd->offset);
217 			else
218 				mdelay(cur_cmd->offset);
219 			break;
220 		case RTW_PWR_CMD_READ:
221 			break;
222 		default:
223 			return -EINVAL;
224 		}
225 	}
226 
227 	return 0;
228 }
229 
230 static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
231 			      const struct rtw_pwr_seq_cmd **cmd_seq)
232 {
233 	u8 cut_mask;
234 	u8 intf_mask;
235 	u8 cut;
236 	u32 idx = 0;
237 	const struct rtw_pwr_seq_cmd *cmd;
238 	int ret;
239 
240 	cut = rtwdev->hal.cut_version;
241 	cut_mask = cut_version_to_mask(cut);
242 	switch (rtw_hci_type(rtwdev)) {
243 	case RTW_HCI_TYPE_PCIE:
244 		intf_mask = RTW_PWR_INTF_PCI_MSK;
245 		break;
246 	case RTW_HCI_TYPE_USB:
247 		intf_mask = RTW_PWR_INTF_USB_MSK;
248 		break;
249 	case RTW_HCI_TYPE_SDIO:
250 		intf_mask = RTW_PWR_INTF_SDIO_MSK;
251 		break;
252 	default:
253 		return -EINVAL;
254 	}
255 
256 	do {
257 		cmd = cmd_seq[idx];
258 		if (!cmd)
259 			break;
260 
261 		ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
262 		if (ret)
263 			return ret;
264 
265 		idx++;
266 	} while (1);
267 
268 	return 0;
269 }
270 
271 static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
272 {
273 	const struct rtw_chip_info *chip = rtwdev->chip;
274 	const struct rtw_pwr_seq_cmd **pwr_seq;
275 	u32 imr = 0;
276 	u8 rpwm;
277 	bool cur_pwr;
278 	int ret;
279 
280 	if (rtw_chip_wcpu_11ac(rtwdev)) {
281 		rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
282 
283 		/* Check FW still exist or not */
284 		if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
285 			rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
286 			rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
287 		}
288 	}
289 
290 	if (rtw_read8(rtwdev, REG_CR) == 0xea)
291 		cur_pwr = false;
292 	else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
293 		 (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
294 		cur_pwr = false;
295 	else
296 		cur_pwr = true;
297 
298 	if (pwr_on == cur_pwr)
299 		return -EALREADY;
300 
301 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) {
302 		imr = rtw_read32(rtwdev, REG_SDIO_HIMR);
303 		rtw_write32(rtwdev, REG_SDIO_HIMR, 0);
304 	}
305 
306 	if (!pwr_on)
307 		clear_bit(RTW_FLAG_POWERON, rtwdev->flags);
308 
309 	pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
310 	ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
311 
312 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO)
313 		rtw_write32(rtwdev, REG_SDIO_HIMR, imr);
314 
315 	if (!ret && pwr_on)
316 		set_bit(RTW_FLAG_POWERON, rtwdev->flags);
317 
318 	return ret;
319 }
320 
321 static int __rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
322 {
323 	u8 sys_func_en = rtwdev->chip->sys_func_en;
324 	u8 value8;
325 	u32 value, tmp;
326 
327 	value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
328 	value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
329 	rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
330 
331 	rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
332 	value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
333 	rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
334 
335 	/* disable boot-from-flash for driver's DL FW */
336 	tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
337 	if (tmp & BIT_BOOT_FSPI_EN) {
338 		rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
339 		value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
340 		rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
341 	}
342 
343 	return 0;
344 }
345 
346 static int __rtw_mac_init_system_cfg_legacy(struct rtw_dev *rtwdev)
347 {
348 	rtw_write8(rtwdev, REG_CR, 0xff);
349 	mdelay(2);
350 	rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0x7f);
351 	mdelay(2);
352 
353 	rtw_write8_set(rtwdev, REG_SYS_CLKR, BIT_WAKEPAD_EN);
354 	rtw_write16_clr(rtwdev, REG_GPIO_MUXCFG, BIT_EN_SIC);
355 
356 	rtw_write16(rtwdev, REG_CR, 0x2ff);
357 
358 	return 0;
359 }
360 
361 static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
362 {
363 	if (rtw_chip_wcpu_11n(rtwdev))
364 		return __rtw_mac_init_system_cfg_legacy(rtwdev);
365 
366 	return __rtw_mac_init_system_cfg(rtwdev);
367 }
368 
369 int rtw_mac_power_on(struct rtw_dev *rtwdev)
370 {
371 	int ret = 0;
372 
373 	ret = rtw_mac_pre_system_cfg(rtwdev);
374 	if (ret)
375 		goto err;
376 
377 	ret = rtw_mac_power_switch(rtwdev, true);
378 	if (ret == -EALREADY) {
379 		rtw_mac_power_switch(rtwdev, false);
380 
381 		ret = rtw_mac_pre_system_cfg(rtwdev);
382 		if (ret)
383 			goto err;
384 
385 		ret = rtw_mac_power_switch(rtwdev, true);
386 		if (ret)
387 			goto err;
388 	} else if (ret) {
389 		goto err;
390 	}
391 
392 	ret = rtw_mac_init_system_cfg(rtwdev);
393 	if (ret)
394 		goto err;
395 
396 	return 0;
397 
398 err:
399 	rtw_err(rtwdev, "mac power on failed");
400 	return ret;
401 }
402 
403 void rtw_mac_power_off(struct rtw_dev *rtwdev)
404 {
405 	rtw_mac_power_switch(rtwdev, false);
406 }
407 
408 static bool check_firmware_size(const u8 *data, u32 size)
409 {
410 	const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
411 	u32 dmem_size;
412 	u32 imem_size;
413 	u32 emem_size;
414 	u32 real_size;
415 
416 	dmem_size = le32_to_cpu(fw_hdr->dmem_size);
417 	imem_size = le32_to_cpu(fw_hdr->imem_size);
418 	emem_size = (fw_hdr->mem_usage & BIT(4)) ?
419 		    le32_to_cpu(fw_hdr->emem_size) : 0;
420 
421 	dmem_size += FW_HDR_CHKSUM_SIZE;
422 	imem_size += FW_HDR_CHKSUM_SIZE;
423 	emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
424 	real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
425 	if (real_size != size)
426 		return false;
427 
428 	return true;
429 }
430 
431 static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
432 {
433 	if (enable) {
434 		/* cpu io interface enable */
435 		rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
436 
437 		/* cpu enable */
438 		rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
439 	} else {
440 		/* cpu io interface disable */
441 		rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
442 
443 		/* cpu disable */
444 		rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
445 	}
446 }
447 
448 #define DLFW_RESTORE_REG_NUM 6
449 
450 static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
451 					 struct rtw_backup_info *bckp)
452 {
453 	u8 tmp;
454 	u8 bckp_idx = 0;
455 
456 	/* set HIQ to hi priority */
457 	bckp[bckp_idx].len = 1;
458 	bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
459 	bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
460 	bckp_idx++;
461 	tmp = RTW_DMA_MAPPING_HIGH << 6;
462 	rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
463 
464 	/* DLFW only use HIQ, map HIQ to hi priority */
465 	bckp[bckp_idx].len = 1;
466 	bckp[bckp_idx].reg = REG_CR;
467 	bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
468 	bckp_idx++;
469 	bckp[bckp_idx].len = 4;
470 	bckp[bckp_idx].reg = REG_H2CQ_CSR;
471 	bckp[bckp_idx].val = BIT_H2CQ_FULL;
472 	bckp_idx++;
473 	tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
474 	rtw_write8(rtwdev, REG_CR, tmp);
475 	rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
476 
477 	/* Config hi priority queue and public priority queue page number */
478 	bckp[bckp_idx].len = 2;
479 	bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
480 	bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
481 	bckp_idx++;
482 	bckp[bckp_idx].len = 4;
483 	bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
484 	bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
485 	bckp_idx++;
486 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
487 	rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
488 
489 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO)
490 		rtw_read32(rtwdev, REG_SDIO_FREE_TXPG);
491 
492 	/* Disable beacon related functions */
493 	tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
494 	bckp[bckp_idx].len = 1;
495 	bckp[bckp_idx].reg = REG_BCN_CTRL;
496 	bckp[bckp_idx].val = tmp;
497 	bckp_idx++;
498 	tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
499 	rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
500 
501 	WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
502 }
503 
504 static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
505 {
506 	rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
507 	rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
508 	rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
509 	rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
510 }
511 
512 static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
513 					  struct rtw_backup_info *bckp,
514 					  u8 bckp_num)
515 {
516 	rtw_restore_reg(rtwdev, bckp, bckp_num);
517 }
518 
519 #define TX_DESC_SIZE 48
520 
521 static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
522 				       const u8 *data, u32 size)
523 {
524 	u8 *buf;
525 	int ret;
526 
527 	buf = kmemdup(data, size, GFP_KERNEL);
528 	if (!buf)
529 		return -ENOMEM;
530 
531 	ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
532 	kfree(buf);
533 	return ret;
534 }
535 
536 static int
537 send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
538 {
539 	int ret;
540 
541 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
542 	    !((size + TX_DESC_SIZE) & (512 - 1)))
543 		size += 1;
544 
545 	ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
546 	if (ret)
547 		rtw_err(rtwdev, "failed to download rsvd page\n");
548 
549 	return ret;
550 }
551 
552 static int
553 iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
554 {
555 	rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
556 	rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
557 	rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
558 
559 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
560 		return -EBUSY;
561 
562 	return 0;
563 }
564 
565 static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
566 				   u32 len, u8 first)
567 {
568 	u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
569 
570 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
571 		return -EBUSY;
572 
573 	ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
574 	if (!first)
575 		ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
576 
577 	if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
578 		return -EBUSY;
579 
580 	return 0;
581 }
582 
583 int rtw_ddma_to_fw_fifo(struct rtw_dev *rtwdev, u32 ocp_src, u32 size)
584 {
585 	u32 ch0_ctrl = BIT_DDMACH0_OWN | BIT_DDMACH0_DDMA_MODE;
586 
587 	if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0)) {
588 		rtw_dbg(rtwdev, RTW_DBG_FW, "busy to start ddma\n");
589 		return -EBUSY;
590 	}
591 
592 	ch0_ctrl |= size & BIT_MASK_DDMACH0_DLEN;
593 
594 	if (iddma_enable(rtwdev, ocp_src, OCPBASE_RXBUF_FW_88XX, ch0_ctrl)) {
595 		rtw_dbg(rtwdev, RTW_DBG_FW, "busy to complete ddma\n");
596 		return -EBUSY;
597 	}
598 
599 	return 0;
600 }
601 
602 static bool
603 check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
604 {
605 	u8 fw_ctrl;
606 
607 	fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
608 
609 	if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
610 		if (addr < OCPBASE_DMEM_88XX) {
611 			fw_ctrl |= BIT_IMEM_DW_OK;
612 			fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
613 			rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
614 		} else {
615 			fw_ctrl |= BIT_DMEM_DW_OK;
616 			fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
617 			rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
618 		}
619 
620 		rtw_err(rtwdev, "invalid fw checksum\n");
621 
622 		return false;
623 	}
624 
625 	if (addr < OCPBASE_DMEM_88XX) {
626 		fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
627 		rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
628 	} else {
629 		fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
630 		rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
631 	}
632 
633 	return true;
634 }
635 
636 static int
637 download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
638 			 u32 src, u32 dst, u32 size)
639 {
640 	const struct rtw_chip_info *chip = rtwdev->chip;
641 	u32 desc_size = chip->tx_pkt_desc_sz;
642 	u8 first_part;
643 	u32 mem_offset;
644 	u32 residue_size;
645 	u32 pkt_size;
646 	u32 max_size = 0x1000;
647 	u32 val;
648 	int ret;
649 
650 	mem_offset = 0;
651 	first_part = 1;
652 	residue_size = size;
653 
654 	val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
655 	val |= BIT_DDMACH0_RESET_CHKSUM_STS;
656 	rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
657 
658 	while (residue_size) {
659 		if (residue_size >= max_size)
660 			pkt_size = max_size;
661 		else
662 			pkt_size = residue_size;
663 
664 		ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
665 					data + mem_offset, pkt_size);
666 		if (ret)
667 			return ret;
668 
669 		ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
670 					      src + desc_size,
671 					      dst + mem_offset, pkt_size,
672 					      first_part);
673 		if (ret)
674 			return ret;
675 
676 		first_part = 0;
677 		mem_offset += pkt_size;
678 		residue_size -= pkt_size;
679 	}
680 
681 	if (!check_fw_checksum(rtwdev, dst))
682 		return -EINVAL;
683 
684 	return 0;
685 }
686 
687 static int
688 start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
689 {
690 	const struct rtw_fw_hdr *fw_hdr = (const struct rtw_fw_hdr *)data;
691 	const u8 *cur_fw;
692 	u16 val;
693 	u32 imem_size;
694 	u32 dmem_size;
695 	u32 emem_size;
696 	u32 addr;
697 	int ret;
698 
699 	dmem_size = le32_to_cpu(fw_hdr->dmem_size);
700 	imem_size = le32_to_cpu(fw_hdr->imem_size);
701 	emem_size = (fw_hdr->mem_usage & BIT(4)) ?
702 		    le32_to_cpu(fw_hdr->emem_size) : 0;
703 	dmem_size += FW_HDR_CHKSUM_SIZE;
704 	imem_size += FW_HDR_CHKSUM_SIZE;
705 	emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
706 
707 	val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
708 	val |= BIT_MCUFWDL_EN;
709 	rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
710 
711 	cur_fw = data + FW_HDR_SIZE;
712 	addr = le32_to_cpu(fw_hdr->dmem_addr);
713 	addr &= ~BIT(31);
714 	ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
715 	if (ret)
716 		return ret;
717 
718 	cur_fw = data + FW_HDR_SIZE + dmem_size;
719 	addr = le32_to_cpu(fw_hdr->imem_addr);
720 	addr &= ~BIT(31);
721 	ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
722 	if (ret)
723 		return ret;
724 
725 	if (emem_size) {
726 		cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
727 		addr = le32_to_cpu(fw_hdr->emem_addr);
728 		addr &= ~BIT(31);
729 		ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
730 					       emem_size);
731 		if (ret)
732 			return ret;
733 	}
734 
735 	return 0;
736 }
737 
738 static int download_firmware_validate(struct rtw_dev *rtwdev)
739 {
740 	u32 fw_key;
741 
742 	if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
743 		fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
744 		if (fw_key == ILLEGAL_KEY_GROUP)
745 			rtw_err(rtwdev, "invalid fw key\n");
746 		return -EINVAL;
747 	}
748 
749 	return 0;
750 }
751 
752 static void download_firmware_end_flow(struct rtw_dev *rtwdev)
753 {
754 	u16 fw_ctrl;
755 
756 	rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
757 
758 	/* Check IMEM & DMEM checksum is OK or not */
759 	fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
760 	if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
761 		return;
762 
763 	fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
764 	rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
765 }
766 
767 static int __rtw_download_firmware(struct rtw_dev *rtwdev,
768 				   struct rtw_fw_state *fw)
769 {
770 	struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
771 	const u8 *data = fw->firmware->data;
772 	u32 size = fw->firmware->size;
773 	u32 ltecoex_bckp;
774 	int ret;
775 
776 	if (!check_firmware_size(data, size))
777 		return -EINVAL;
778 
779 	if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
780 		return -EBUSY;
781 
782 	wlan_cpu_enable(rtwdev, false);
783 
784 	download_firmware_reg_backup(rtwdev, bckp);
785 	download_firmware_reset_platform(rtwdev);
786 
787 	ret = start_download_firmware(rtwdev, data, size);
788 	if (ret)
789 		goto dlfw_fail;
790 
791 	download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
792 
793 	download_firmware_end_flow(rtwdev);
794 
795 	wlan_cpu_enable(rtwdev, true);
796 
797 	if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) {
798 		ret = -EBUSY;
799 		goto dlfw_fail;
800 	}
801 
802 	ret = download_firmware_validate(rtwdev);
803 	if (ret)
804 		goto dlfw_fail;
805 
806 	/* reset desc and index */
807 	rtw_hci_setup(rtwdev);
808 
809 	rtwdev->h2c.last_box_num = 0;
810 	rtwdev->h2c.seq = 0;
811 
812 	set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
813 
814 	return 0;
815 
816 dlfw_fail:
817 	/* Disable FWDL_EN */
818 	rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
819 	rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
820 
821 	return ret;
822 }
823 
824 static void en_download_firmware_legacy(struct rtw_dev *rtwdev, bool en)
825 {
826 	int try;
827 
828 	if (en) {
829 		wlan_cpu_enable(rtwdev, false);
830 		wlan_cpu_enable(rtwdev, true);
831 
832 		rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
833 
834 		for (try = 0; try < 10; try++) {
835 			if (rtw_read8(rtwdev, REG_MCUFW_CTRL) & BIT_MCUFWDL_EN)
836 				goto fwdl_ready;
837 			rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
838 			msleep(20);
839 		}
840 		rtw_err(rtwdev, "failed to check fw download ready\n");
841 fwdl_ready:
842 		rtw_write32_clr(rtwdev, REG_MCUFW_CTRL, BIT_ROM_DLEN);
843 	} else {
844 		rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
845 	}
846 }
847 
848 static void
849 write_firmware_page(struct rtw_dev *rtwdev, u32 page, const u8 *data, u32 size)
850 {
851 	u32 val32;
852 	u32 block_nr;
853 	u32 remain_size;
854 	u32 write_addr = FW_START_ADDR_LEGACY;
855 	const __le32 *ptr = (const __le32 *)data;
856 	u32 block;
857 	__le32 remain_data = 0;
858 
859 	block_nr = size >> DLFW_BLK_SIZE_SHIFT_LEGACY;
860 	remain_size = size & (DLFW_BLK_SIZE_LEGACY - 1);
861 
862 	val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
863 	val32 &= ~BIT_ROM_PGE;
864 	val32 |= (page << BIT_SHIFT_ROM_PGE) & BIT_ROM_PGE;
865 	rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
866 
867 	for (block = 0; block < block_nr; block++) {
868 		rtw_write32(rtwdev, write_addr, le32_to_cpu(*ptr));
869 
870 		write_addr += DLFW_BLK_SIZE_LEGACY;
871 		ptr++;
872 	}
873 
874 	if (remain_size) {
875 		memcpy(&remain_data, ptr, remain_size);
876 		rtw_write32(rtwdev, write_addr, le32_to_cpu(remain_data));
877 	}
878 }
879 
880 static int
881 download_firmware_legacy(struct rtw_dev *rtwdev, const u8 *data, u32 size)
882 {
883 	u32 page;
884 	u32 total_page;
885 	u32 last_page_size;
886 
887 	data += sizeof(struct rtw_fw_hdr_legacy);
888 	size -= sizeof(struct rtw_fw_hdr_legacy);
889 
890 	total_page = size >> DLFW_PAGE_SIZE_SHIFT_LEGACY;
891 	last_page_size = size & (DLFW_PAGE_SIZE_LEGACY - 1);
892 
893 	rtw_write8_set(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT);
894 
895 	for (page = 0; page < total_page; page++) {
896 		write_firmware_page(rtwdev, page, data, DLFW_PAGE_SIZE_LEGACY);
897 		data += DLFW_PAGE_SIZE_LEGACY;
898 	}
899 	if (last_page_size)
900 		write_firmware_page(rtwdev, page, data, last_page_size);
901 
902 	if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, BIT_FWDL_CHK_RPT, 1)) {
903 		rtw_err(rtwdev, "failed to check download firmware report\n");
904 		return -EINVAL;
905 	}
906 
907 	return 0;
908 }
909 
910 static int download_firmware_validate_legacy(struct rtw_dev *rtwdev)
911 {
912 	u32 val32;
913 	int try;
914 
915 	val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
916 	val32 |= BIT_MCUFWDL_RDY;
917 	val32 &= ~BIT_WINTINI_RDY;
918 	rtw_write32(rtwdev, REG_MCUFW_CTRL, val32);
919 
920 	wlan_cpu_enable(rtwdev, false);
921 	wlan_cpu_enable(rtwdev, true);
922 
923 	for (try = 0; try < 10; try++) {
924 		val32 = rtw_read32(rtwdev, REG_MCUFW_CTRL);
925 		if ((val32 & FW_READY_LEGACY) == FW_READY_LEGACY)
926 			return 0;
927 		msleep(20);
928 	}
929 
930 	rtw_err(rtwdev, "failed to validate firmware\n");
931 	return -EINVAL;
932 }
933 
934 static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
935 					  struct rtw_fw_state *fw)
936 {
937 	int ret = 0;
938 
939 	en_download_firmware_legacy(rtwdev, true);
940 	ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
941 	en_download_firmware_legacy(rtwdev, false);
942 	if (ret)
943 		goto out;
944 
945 	ret = download_firmware_validate_legacy(rtwdev);
946 	if (ret)
947 		goto out;
948 
949 	/* reset desc and index */
950 	rtw_hci_setup(rtwdev);
951 
952 	rtwdev->h2c.last_box_num = 0;
953 	rtwdev->h2c.seq = 0;
954 
955 	set_bit(RTW_FLAG_FW_RUNNING, rtwdev->flags);
956 
957 out:
958 	return ret;
959 }
960 
961 static
962 int _rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
963 {
964 	if (rtw_chip_wcpu_11n(rtwdev))
965 		return __rtw_download_firmware_legacy(rtwdev, fw);
966 
967 	return __rtw_download_firmware(rtwdev, fw);
968 }
969 
970 int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
971 {
972 	int ret;
973 
974 	ret = _rtw_download_firmware(rtwdev, fw);
975 	if (ret)
976 		return ret;
977 
978 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
979 	    rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
980 		rtw_fw_set_recover_bt_device(rtwdev);
981 
982 	return 0;
983 }
984 
985 static u32 get_priority_queues(struct rtw_dev *rtwdev, u32 queues)
986 {
987 	const struct rtw_rqpn *rqpn = rtwdev->fifo.rqpn;
988 	u32 prio_queues = 0;
989 
990 	if (queues & BIT(IEEE80211_AC_VO))
991 		prio_queues |= BIT(rqpn->dma_map_vo);
992 	if (queues & BIT(IEEE80211_AC_VI))
993 		prio_queues |= BIT(rqpn->dma_map_vi);
994 	if (queues & BIT(IEEE80211_AC_BE))
995 		prio_queues |= BIT(rqpn->dma_map_be);
996 	if (queues & BIT(IEEE80211_AC_BK))
997 		prio_queues |= BIT(rqpn->dma_map_bk);
998 
999 	return prio_queues;
1000 }
1001 
1002 static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
1003 				       u32 prio_queue, bool drop)
1004 {
1005 	const struct rtw_chip_info *chip = rtwdev->chip;
1006 	const struct rtw_prioq_addr *addr;
1007 	bool wsize;
1008 	u16 avail_page, rsvd_page;
1009 	int i;
1010 
1011 	if (prio_queue >= RTW_DMA_MAPPING_MAX)
1012 		return;
1013 
1014 	addr = &chip->prioq_addrs->prio[prio_queue];
1015 	wsize = chip->prioq_addrs->wsize;
1016 
1017 	/* check if all of the reserved pages are available for 100 msecs */
1018 	for (i = 0; i < 5; i++) {
1019 		rsvd_page = wsize ? rtw_read16(rtwdev, addr->rsvd) :
1020 				     rtw_read8(rtwdev, addr->rsvd);
1021 		avail_page = wsize ? rtw_read16(rtwdev, addr->avail) :
1022 				      rtw_read8(rtwdev, addr->avail);
1023 		if (rsvd_page == avail_page)
1024 			return;
1025 
1026 		msleep(20);
1027 	}
1028 
1029 	/* priority queue is still not empty, throw a warning,
1030 	 *
1031 	 * Note that if we want to flush the tx queue when having a lot of
1032 	 * traffic (ex, 100Mbps up), some of the packets could be dropped.
1033 	 * And it requires like ~2secs to flush the full priority queue.
1034 	 */
1035 	if (!drop)
1036 		rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
1037 }
1038 
1039 static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
1040 				      u32 prio_queues, bool drop)
1041 {
1042 	u32 q;
1043 
1044 	for (q = 0; q < RTW_DMA_MAPPING_MAX; q++)
1045 		if (prio_queues & BIT(q))
1046 			__rtw_mac_flush_prio_queue(rtwdev, q, drop);
1047 }
1048 
1049 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
1050 {
1051 	u32 prio_queues = 0;
1052 
1053 	/* If all of the hardware queues are requested to flush,
1054 	 * or the priority queues are not mapped yet,
1055 	 * flush all of the priority queues
1056 	 */
1057 	if (queues == BIT(rtwdev->hw->queues) - 1 || !rtwdev->fifo.rqpn)
1058 		prio_queues = BIT(RTW_DMA_MAPPING_MAX) - 1;
1059 	else
1060 		prio_queues = get_priority_queues(rtwdev, queues);
1061 
1062 	rtw_mac_flush_prio_queues(rtwdev, prio_queues, drop);
1063 }
1064 
1065 static int txdma_queue_mapping(struct rtw_dev *rtwdev)
1066 {
1067 	const struct rtw_chip_info *chip = rtwdev->chip;
1068 	const struct rtw_rqpn *rqpn = NULL;
1069 	u16 txdma_pq_map = 0;
1070 
1071 	switch (rtw_hci_type(rtwdev)) {
1072 	case RTW_HCI_TYPE_PCIE:
1073 		rqpn = &chip->rqpn_table[1];
1074 		break;
1075 	case RTW_HCI_TYPE_USB:
1076 		if (rtwdev->hci.bulkout_num == 2)
1077 			rqpn = &chip->rqpn_table[2];
1078 		else if (rtwdev->hci.bulkout_num == 3)
1079 			rqpn = &chip->rqpn_table[3];
1080 		else if (rtwdev->hci.bulkout_num == 4)
1081 			rqpn = &chip->rqpn_table[4];
1082 		else
1083 			return -EINVAL;
1084 		break;
1085 	case RTW_HCI_TYPE_SDIO:
1086 		rqpn = &chip->rqpn_table[0];
1087 		break;
1088 	default:
1089 		return -EINVAL;
1090 	}
1091 
1092 	rtwdev->fifo.rqpn = rqpn;
1093 	txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
1094 	txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
1095 	txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
1096 	txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
1097 	txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
1098 	txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
1099 	rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
1100 
1101 	rtw_write8(rtwdev, REG_CR, 0);
1102 	rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
1103 	if (rtw_chip_wcpu_11ac(rtwdev))
1104 		rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
1105 
1106 	if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO) {
1107 		rtw_read32(rtwdev, REG_SDIO_FREE_TXPG);
1108 		rtw_write32(rtwdev, REG_SDIO_TX_CTRL, 0);
1109 	} else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) {
1110 		rtw_write8_set(rtwdev, REG_TXDMA_PQ_MAP, BIT_RXDMA_ARBBW_EN);
1111 	}
1112 
1113 	return 0;
1114 }
1115 
1116 static int set_trx_fifo_info(struct rtw_dev *rtwdev)
1117 {
1118 	const struct rtw_chip_info *chip = rtwdev->chip;
1119 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1120 	u16 cur_pg_addr;
1121 	u8 csi_buf_pg_num = chip->csi_buf_pg_num;
1122 
1123 	/* config rsvd page num */
1124 	fifo->rsvd_drv_pg_num = chip->rsvd_drv_pg_num;
1125 	fifo->txff_pg_num = chip->txff_size >> 7;
1126 	if (rtw_chip_wcpu_11n(rtwdev))
1127 		fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num;
1128 	else
1129 		fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
1130 				   RSVD_PG_H2C_EXTRAINFO_NUM +
1131 				   RSVD_PG_H2C_STATICINFO_NUM +
1132 				   RSVD_PG_H2CQ_NUM +
1133 				   RSVD_PG_CPU_INSTRUCTION_NUM +
1134 				   RSVD_PG_FW_TXBUF_NUM +
1135 				   csi_buf_pg_num;
1136 
1137 	if (fifo->rsvd_pg_num > fifo->txff_pg_num)
1138 		return -ENOMEM;
1139 
1140 	fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
1141 	fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
1142 
1143 	cur_pg_addr = fifo->txff_pg_num;
1144 	if (rtw_chip_wcpu_11ac(rtwdev)) {
1145 		cur_pg_addr -= csi_buf_pg_num;
1146 		fifo->rsvd_csibuf_addr = cur_pg_addr;
1147 		cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
1148 		fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
1149 		cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
1150 		fifo->rsvd_cpu_instr_addr = cur_pg_addr;
1151 		cur_pg_addr -= RSVD_PG_H2CQ_NUM;
1152 		fifo->rsvd_h2cq_addr = cur_pg_addr;
1153 		cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
1154 		fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
1155 		cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
1156 		fifo->rsvd_h2c_info_addr = cur_pg_addr;
1157 	}
1158 	cur_pg_addr -= fifo->rsvd_drv_pg_num;
1159 	fifo->rsvd_drv_addr = cur_pg_addr;
1160 
1161 	if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
1162 		rtw_err(rtwdev, "wrong rsvd driver address\n");
1163 		return -EINVAL;
1164 	}
1165 
1166 	return 0;
1167 }
1168 
1169 static int __priority_queue_cfg(struct rtw_dev *rtwdev,
1170 				const struct rtw_page_table *pg_tbl,
1171 				u16 pubq_num)
1172 {
1173 	const struct rtw_chip_info *chip = rtwdev->chip;
1174 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1175 
1176 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
1177 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
1178 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
1179 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
1180 	rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
1181 	rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
1182 
1183 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
1184 	rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
1185 
1186 	rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
1187 	rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
1188 	rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
1189 	rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
1190 	rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
1191 
1192 	if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
1193 		return -EBUSY;
1194 
1195 	rtw_write8(rtwdev, REG_CR + 3, 0);
1196 
1197 	return 0;
1198 }
1199 
1200 static int __priority_queue_cfg_legacy(struct rtw_dev *rtwdev,
1201 				       const struct rtw_page_table *pg_tbl,
1202 				       u16 pubq_num)
1203 {
1204 	const struct rtw_chip_info *chip = rtwdev->chip;
1205 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1206 	u32 val32;
1207 
1208 	val32 = BIT_RQPN_NE(pg_tbl->nq_num, pg_tbl->exq_num);
1209 	rtw_write32(rtwdev, REG_RQPN_NPQ, val32);
1210 	val32 = BIT_RQPN_HLP(pg_tbl->hq_num, pg_tbl->lq_num, pubq_num);
1211 	rtw_write32(rtwdev, REG_RQPN, val32);
1212 
1213 	rtw_write8(rtwdev, REG_TRXFF_BNDY, fifo->rsvd_boundary);
1214 	rtw_write16(rtwdev, REG_TRXFF_BNDY + 2, chip->rxff_size - REPORT_BUF - 1);
1215 	rtw_write8(rtwdev, REG_DWBCN0_CTRL + 1, fifo->rsvd_boundary);
1216 	rtw_write8(rtwdev, REG_BCNQ_BDNY, fifo->rsvd_boundary);
1217 	rtw_write8(rtwdev, REG_MGQ_BDNY, fifo->rsvd_boundary);
1218 	rtw_write8(rtwdev, REG_WMAC_LBK_BF_HD, fifo->rsvd_boundary);
1219 
1220 	rtw_write32_set(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT);
1221 
1222 	if (!check_hw_ready(rtwdev, REG_AUTO_LLT, BIT_AUTO_INIT_LLT, 0))
1223 		return -EBUSY;
1224 
1225 	return 0;
1226 }
1227 
1228 static int priority_queue_cfg(struct rtw_dev *rtwdev)
1229 {
1230 	const struct rtw_chip_info *chip = rtwdev->chip;
1231 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1232 	const struct rtw_page_table *pg_tbl = NULL;
1233 	u16 pubq_num;
1234 	int ret;
1235 
1236 	ret = set_trx_fifo_info(rtwdev);
1237 	if (ret)
1238 		return ret;
1239 
1240 	switch (rtw_hci_type(rtwdev)) {
1241 	case RTW_HCI_TYPE_PCIE:
1242 		pg_tbl = &chip->page_table[1];
1243 		break;
1244 	case RTW_HCI_TYPE_USB:
1245 		if (rtwdev->hci.bulkout_num == 2)
1246 			pg_tbl = &chip->page_table[2];
1247 		else if (rtwdev->hci.bulkout_num == 3)
1248 			pg_tbl = &chip->page_table[3];
1249 		else if (rtwdev->hci.bulkout_num == 4)
1250 			pg_tbl = &chip->page_table[4];
1251 		else
1252 			return -EINVAL;
1253 		break;
1254 	case RTW_HCI_TYPE_SDIO:
1255 		pg_tbl = &chip->page_table[0];
1256 		break;
1257 	default:
1258 		return -EINVAL;
1259 	}
1260 
1261 	pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
1262 		   pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
1263 	if (rtw_chip_wcpu_11n(rtwdev))
1264 		return __priority_queue_cfg_legacy(rtwdev, pg_tbl, pubq_num);
1265 	else
1266 		return __priority_queue_cfg(rtwdev, pg_tbl, pubq_num);
1267 }
1268 
1269 static int init_h2c(struct rtw_dev *rtwdev)
1270 {
1271 	struct rtw_fifo_conf *fifo = &rtwdev->fifo;
1272 	u8 value8;
1273 	u32 value32;
1274 	u32 h2cq_addr;
1275 	u32 h2cq_size;
1276 	u32 h2cq_free;
1277 	u32 wp, rp;
1278 
1279 	if (rtw_chip_wcpu_11n(rtwdev))
1280 		return 0;
1281 
1282 	h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
1283 	h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
1284 
1285 	value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
1286 	value32 = (value32 & 0xFFFC0000) | h2cq_addr;
1287 	rtw_write32(rtwdev, REG_H2C_HEAD, value32);
1288 
1289 	value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
1290 	value32 = (value32 & 0xFFFC0000) | h2cq_addr;
1291 	rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
1292 
1293 	value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
1294 	value32 &= 0xFFFC0000;
1295 	value32 |= (h2cq_addr + h2cq_size);
1296 	rtw_write32(rtwdev, REG_H2C_TAIL, value32);
1297 
1298 	value8 = rtw_read8(rtwdev, REG_H2C_INFO);
1299 	value8 = (u8)((value8 & 0xFC) | 0x01);
1300 	rtw_write8(rtwdev, REG_H2C_INFO, value8);
1301 
1302 	value8 = rtw_read8(rtwdev, REG_H2C_INFO);
1303 	value8 = (u8)((value8 & 0xFB) | 0x04);
1304 	rtw_write8(rtwdev, REG_H2C_INFO, value8);
1305 
1306 	value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
1307 	value8 = (u8)((value8 & 0x7f) | 0x80);
1308 	rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
1309 
1310 	wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
1311 	rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
1312 	h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
1313 
1314 	if (h2cq_size != h2cq_free) {
1315 		rtw_err(rtwdev, "H2C queue mismatch\n");
1316 		return -EINVAL;
1317 	}
1318 
1319 	return 0;
1320 }
1321 
1322 static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
1323 {
1324 	int ret;
1325 
1326 	ret = txdma_queue_mapping(rtwdev);
1327 	if (ret)
1328 		return ret;
1329 
1330 	ret = priority_queue_cfg(rtwdev);
1331 	if (ret)
1332 		return ret;
1333 
1334 	ret = init_h2c(rtwdev);
1335 	if (ret)
1336 		return ret;
1337 
1338 	return 0;
1339 }
1340 
1341 static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
1342 {
1343 	u8 value8;
1344 
1345 	rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
1346 	if (rtw_chip_wcpu_11ac(rtwdev)) {
1347 		value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
1348 		value8 &= 0xF0;
1349 		/* For rxdesc len = 0 issue */
1350 		value8 |= 0xF;
1351 		rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
1352 	}
1353 	rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
1354 	rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
1355 
1356 	return 0;
1357 }
1358 
1359 int rtw_mac_init(struct rtw_dev *rtwdev)
1360 {
1361 	const struct rtw_chip_info *chip = rtwdev->chip;
1362 	int ret;
1363 
1364 	ret = rtw_init_trx_cfg(rtwdev);
1365 	if (ret)
1366 		return ret;
1367 
1368 	ret = chip->ops->mac_init(rtwdev);
1369 	if (ret)
1370 		return ret;
1371 
1372 	ret = rtw_drv_info_cfg(rtwdev);
1373 	if (ret)
1374 		return ret;
1375 
1376 	rtw_hci_interface_cfg(rtwdev);
1377 
1378 	return 0;
1379 }
1380