xref: /linux/drivers/net/wireless/realtek/rtw89/fw.c (revision c9d23f9657cabfd2836a096bf6eddf8df2cf1434)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "cam.h"
6 #include "chan.h"
7 #include "coex.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "phy.h"
12 #include "reg.h"
13 #include "util.h"
14 
15 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
16 				    struct sk_buff *skb);
17 
18 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
19 					      bool header)
20 {
21 	struct sk_buff *skb;
22 	u32 header_len = 0;
23 	u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
24 
25 	if (header)
26 		header_len = H2C_HEADER_LEN;
27 
28 	skb = dev_alloc_skb(len + header_len + h2c_desc_size);
29 	if (!skb)
30 		return NULL;
31 	skb_reserve(skb, header_len + h2c_desc_size);
32 	memset(skb->data, 0, len);
33 
34 	return skb;
35 }
36 
37 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
38 {
39 	return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
40 }
41 
42 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
43 {
44 	return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
45 }
46 
47 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
48 {
49 	u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
50 
51 	return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val);
52 }
53 
54 #define FWDL_WAIT_CNT 400000
55 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev)
56 {
57 	u8 val;
58 	int ret;
59 
60 	ret = read_poll_timeout_atomic(_fw_get_rdy, val,
61 				       val == RTW89_FWDL_WCPU_FW_INIT_RDY,
62 				       1, FWDL_WAIT_CNT, false, rtwdev);
63 	if (ret) {
64 		switch (val) {
65 		case RTW89_FWDL_CHECKSUM_FAIL:
66 			rtw89_err(rtwdev, "fw checksum fail\n");
67 			return -EINVAL;
68 
69 		case RTW89_FWDL_SECURITY_FAIL:
70 			rtw89_err(rtwdev, "fw security fail\n");
71 			return -EINVAL;
72 
73 		case RTW89_FWDL_CV_NOT_MATCH:
74 			rtw89_err(rtwdev, "fw cv not match\n");
75 			return -EINVAL;
76 
77 		default:
78 			return -EBUSY;
79 		}
80 	}
81 
82 	set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
83 
84 	return 0;
85 }
86 
87 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
88 			       struct rtw89_fw_bin_info *info)
89 {
90 	struct rtw89_fw_hdr_section_info *section_info;
91 	const u8 *fw_end = fw + len;
92 	const u8 *fwdynhdr;
93 	const u8 *bin;
94 	u32 base_hdr_len;
95 	u32 mssc_len = 0;
96 	u32 i;
97 
98 	if (!info)
99 		return -EINVAL;
100 
101 	info->section_num = GET_FW_HDR_SEC_NUM(fw);
102 	base_hdr_len = RTW89_FW_HDR_SIZE +
103 		       info->section_num * RTW89_FW_SECTION_HDR_SIZE;
104 	info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw);
105 
106 	if (info->dynamic_hdr_en) {
107 		info->hdr_len = GET_FW_HDR_LEN(fw);
108 		info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
109 		fwdynhdr = fw + base_hdr_len;
110 		if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) {
111 			rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
112 			return -EINVAL;
113 		}
114 	} else {
115 		info->hdr_len = base_hdr_len;
116 		info->dynamic_hdr_len = 0;
117 	}
118 
119 	bin = fw + info->hdr_len;
120 
121 	/* jump to section header */
122 	fw += RTW89_FW_HDR_SIZE;
123 	section_info = info->section_info;
124 	for (i = 0; i < info->section_num; i++) {
125 		section_info->type = GET_FWSECTION_HDR_SECTIONTYPE(fw);
126 		if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
127 			section_info->mssc = GET_FWSECTION_HDR_MSSC(fw);
128 			mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
129 		} else {
130 			section_info->mssc = 0;
131 		}
132 
133 		section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw);
134 		if (GET_FWSECTION_HDR_CHECKSUM(fw))
135 			section_info->len += FWDL_SECTION_CHKSUM_LEN;
136 		section_info->redl = GET_FWSECTION_HDR_REDL(fw);
137 		section_info->dladdr =
138 				GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff;
139 		section_info->addr = bin;
140 		bin += section_info->len;
141 		fw += RTW89_FW_SECTION_HDR_SIZE;
142 		section_info++;
143 	}
144 
145 	if (fw_end != bin + mssc_len) {
146 		rtw89_err(rtwdev, "[ERR]fw bin size\n");
147 		return -EINVAL;
148 	}
149 
150 	return 0;
151 }
152 
153 static
154 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
155 			struct rtw89_fw_suit *fw_suit, bool nowarn)
156 {
157 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
158 	const u8 *mfw = fw_info->firmware->data;
159 	u32 mfw_len = fw_info->firmware->size;
160 	const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
161 	const struct rtw89_mfw_info *mfw_info;
162 	int i;
163 
164 	if (mfw_hdr->sig != RTW89_MFW_SIG) {
165 		rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
166 		/* legacy firmware support normal type only */
167 		if (type != RTW89_FW_NORMAL)
168 			return -EINVAL;
169 		fw_suit->data = mfw;
170 		fw_suit->size = mfw_len;
171 		return 0;
172 	}
173 
174 	for (i = 0; i < mfw_hdr->fw_nr; i++) {
175 		mfw_info = &mfw_hdr->info[i];
176 		if (mfw_info->cv != rtwdev->hal.cv ||
177 		    mfw_info->type != type ||
178 		    mfw_info->mp)
179 			continue;
180 
181 		fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
182 		fw_suit->size = le32_to_cpu(mfw_info->size);
183 		return 0;
184 	}
185 
186 	if (!nowarn)
187 		rtw89_err(rtwdev, "no suitable firmware found\n");
188 	return -ENOENT;
189 }
190 
191 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
192 				enum rtw89_fw_type type,
193 				struct rtw89_fw_suit *fw_suit)
194 {
195 	const u8 *hdr = fw_suit->data;
196 
197 	fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr);
198 	fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr);
199 	fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr);
200 	fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr);
201 	fw_suit->build_year = GET_FW_HDR_YEAR(hdr);
202 	fw_suit->build_mon = GET_FW_HDR_MONTH(hdr);
203 	fw_suit->build_date = GET_FW_HDR_DATE(hdr);
204 	fw_suit->build_hour = GET_FW_HDR_HOUR(hdr);
205 	fw_suit->build_min = GET_FW_HDR_MIN(hdr);
206 	fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr);
207 
208 	rtw89_info(rtwdev,
209 		   "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n",
210 		   fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
211 		   fw_suit->sub_idex, fw_suit->cmd_ver, type);
212 }
213 
214 static
215 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
216 			 bool nowarn)
217 {
218 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
219 	int ret;
220 
221 	ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
222 	if (ret)
223 		return ret;
224 
225 	rtw89_fw_update_ver(rtwdev, type, fw_suit);
226 
227 	return 0;
228 }
229 
230 #define __DEF_FW_FEAT_COND(__cond, __op) \
231 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
232 { \
233 	return suit_ver_code __op comp_ver_code; \
234 }
235 
236 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
237 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
238 __DEF_FW_FEAT_COND(lt, <); /* less than */
239 
240 struct __fw_feat_cfg {
241 	enum rtw89_core_chip_id chip_id;
242 	enum rtw89_fw_feature feature;
243 	u32 ver_code;
244 	bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
245 };
246 
247 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
248 	{ \
249 		.chip_id = _chip, \
250 		.feature = RTW89_FW_FEATURE_ ## _feat, \
251 		.ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
252 		.cond = __fw_feat_cond_ ## _cond, \
253 	}
254 
255 static const struct __fw_feat_cfg fw_feat_tbl[] = {
256 	__CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
257 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
258 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
259 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
260 	__CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
261 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
262 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
263 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
264 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
265 	__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
266 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
267 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
268 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
269 };
270 
271 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
272 {
273 	const struct rtw89_chip_info *chip = rtwdev->chip;
274 	const struct __fw_feat_cfg *ent;
275 	const struct rtw89_fw_suit *fw_suit;
276 	u32 suit_ver_code;
277 	int i;
278 
279 	fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
280 	suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
281 
282 	for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
283 		ent = &fw_feat_tbl[i];
284 		if (chip->chip_id != ent->chip_id)
285 			continue;
286 
287 		if (ent->cond(suit_ver_code, ent->ver_code))
288 			RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw);
289 	}
290 }
291 
292 const struct firmware *
293 rtw89_early_fw_feature_recognize(struct device *device,
294 				 const struct rtw89_chip_info *chip,
295 				 u32 *early_feat_map)
296 {
297 	union rtw89_compat_fw_hdr buf = {};
298 	const struct firmware *firmware;
299 	bool full_req = false;
300 	u32 ver_code;
301 	int ret;
302 	int i;
303 
304 	/* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will
305 	 * be denied (-EPERM). Then, we don't get right firmware things as
306 	 * expected. So, in this case, we have to request full firmware here.
307 	 */
308 	if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE))
309 		full_req = true;
310 
311 	if (full_req)
312 		ret = request_firmware(&firmware, chip->fw_name, device);
313 	else
314 		ret = request_partial_firmware_into_buf(&firmware, chip->fw_name,
315 							device, &buf, sizeof(buf),
316 							0);
317 
318 	if (ret) {
319 		dev_err(device, "failed to early request firmware: %d\n", ret);
320 		return NULL;
321 	}
322 
323 	if (full_req)
324 		ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
325 	else
326 		ver_code = rtw89_compat_fw_hdr_ver_code(&buf);
327 
328 	if (!ver_code)
329 		goto out;
330 
331 	for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
332 		const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
333 
334 		if (chip->chip_id != ent->chip_id)
335 			continue;
336 
337 		if (ent->cond(ver_code, ent->ver_code))
338 			*early_feat_map |= BIT(ent->feature);
339 	}
340 
341 out:
342 	if (full_req)
343 		return firmware;
344 
345 	release_firmware(firmware);
346 	return NULL;
347 }
348 
349 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
350 {
351 	const struct rtw89_chip_info *chip = rtwdev->chip;
352 	int ret;
353 
354 	if (chip->try_ce_fw) {
355 		ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
356 		if (!ret)
357 			goto normal_done;
358 	}
359 
360 	ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
361 	if (ret)
362 		return ret;
363 
364 normal_done:
365 	/* It still works if wowlan firmware isn't existing. */
366 	__rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
367 
368 	rtw89_fw_recognize_features(rtwdev);
369 
370 	rtw89_coex_recognize_ver(rtwdev);
371 
372 	return 0;
373 }
374 
375 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
376 			   u8 type, u8 cat, u8 class, u8 func,
377 			   bool rack, bool dack, u32 len)
378 {
379 	struct fwcmd_hdr *hdr;
380 
381 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
382 
383 	if (!(rtwdev->fw.h2c_seq % 4))
384 		rack = true;
385 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
386 				FIELD_PREP(H2C_HDR_CAT, cat) |
387 				FIELD_PREP(H2C_HDR_CLASS, class) |
388 				FIELD_PREP(H2C_HDR_FUNC, func) |
389 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
390 
391 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
392 					   len + H2C_HEADER_LEN) |
393 				(rack ? H2C_HDR_REC_ACK : 0) |
394 				(dack ? H2C_HDR_DONE_ACK : 0));
395 
396 	rtwdev->fw.h2c_seq++;
397 }
398 
399 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
400 				       struct sk_buff *skb,
401 				       u8 type, u8 cat, u8 class, u8 func,
402 				       u32 len)
403 {
404 	struct fwcmd_hdr *hdr;
405 
406 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
407 
408 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
409 				FIELD_PREP(H2C_HDR_CAT, cat) |
410 				FIELD_PREP(H2C_HDR_CLASS, class) |
411 				FIELD_PREP(H2C_HDR_FUNC, func) |
412 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
413 
414 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
415 					   len + H2C_HEADER_LEN));
416 }
417 
418 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
419 {
420 	struct sk_buff *skb;
421 	u32 ret = 0;
422 
423 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
424 	if (!skb) {
425 		rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
426 		return -ENOMEM;
427 	}
428 
429 	skb_put_data(skb, fw, len);
430 	SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
431 	rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
432 				   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
433 				   H2C_FUNC_MAC_FWHDR_DL, len);
434 
435 	ret = rtw89_h2c_tx(rtwdev, skb, false);
436 	if (ret) {
437 		rtw89_err(rtwdev, "failed to send h2c\n");
438 		ret = -1;
439 		goto fail;
440 	}
441 
442 	return 0;
443 fail:
444 	dev_kfree_skb_any(skb);
445 
446 	return ret;
447 }
448 
449 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
450 {
451 	u8 val;
452 	int ret;
453 
454 	ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
455 	if (ret) {
456 		rtw89_err(rtwdev, "[ERR]FW header download\n");
457 		return ret;
458 	}
459 
460 	ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY,
461 				       1, FWDL_WAIT_CNT, false,
462 				       rtwdev, R_AX_WCPU_FW_CTRL);
463 	if (ret) {
464 		rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
465 		return ret;
466 	}
467 
468 	rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
469 	rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
470 
471 	return 0;
472 }
473 
474 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
475 				    struct rtw89_fw_hdr_section_info *info)
476 {
477 	struct sk_buff *skb;
478 	const u8 *section = info->addr;
479 	u32 residue_len = info->len;
480 	u32 pkt_len;
481 	int ret;
482 
483 	while (residue_len) {
484 		if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
485 			pkt_len = FWDL_SECTION_PER_PKT_LEN;
486 		else
487 			pkt_len = residue_len;
488 
489 		skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
490 		if (!skb) {
491 			rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
492 			return -ENOMEM;
493 		}
494 		skb_put_data(skb, section, pkt_len);
495 
496 		ret = rtw89_h2c_tx(rtwdev, skb, true);
497 		if (ret) {
498 			rtw89_err(rtwdev, "failed to send h2c\n");
499 			ret = -1;
500 			goto fail;
501 		}
502 
503 		section += pkt_len;
504 		residue_len -= pkt_len;
505 	}
506 
507 	return 0;
508 fail:
509 	dev_kfree_skb_any(skb);
510 
511 	return ret;
512 }
513 
514 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw,
515 				  struct rtw89_fw_bin_info *info)
516 {
517 	struct rtw89_fw_hdr_section_info *section_info = info->section_info;
518 	u8 section_num = info->section_num;
519 	int ret;
520 
521 	while (section_num--) {
522 		ret = __rtw89_fw_download_main(rtwdev, section_info);
523 		if (ret)
524 			return ret;
525 		section_info++;
526 	}
527 
528 	mdelay(5);
529 
530 	ret = rtw89_fw_check_rdy(rtwdev);
531 	if (ret) {
532 		rtw89_warn(rtwdev, "download firmware fail\n");
533 		return ret;
534 	}
535 
536 	return 0;
537 }
538 
539 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
540 {
541 	u32 val32;
542 	u16 index;
543 
544 	rtw89_write32(rtwdev, R_AX_DBG_CTRL,
545 		      FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
546 		      FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
547 	rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
548 
549 	for (index = 0; index < 15; index++) {
550 		val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
551 		rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
552 		fsleep(10);
553 	}
554 }
555 
556 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
557 {
558 	u32 val32;
559 	u16 val16;
560 
561 	val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
562 	rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
563 
564 	val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
565 	rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
566 
567 	rtw89_fw_prog_cnt_dump(rtwdev);
568 }
569 
570 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
571 {
572 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
573 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
574 	struct rtw89_fw_bin_info info;
575 	const u8 *fw = fw_suit->data;
576 	u32 len = fw_suit->size;
577 	u8 val;
578 	int ret;
579 
580 	rtw89_mac_disable_cpu(rtwdev);
581 	ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
582 	if (ret)
583 		return ret;
584 
585 	if (!fw || !len) {
586 		rtw89_err(rtwdev, "fw type %d isn't recognized\n", type);
587 		return -ENOENT;
588 	}
589 
590 	ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info);
591 	if (ret) {
592 		rtw89_err(rtwdev, "parse fw header fail\n");
593 		goto fwdl_err;
594 	}
595 
596 	ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY,
597 				       1, FWDL_WAIT_CNT, false,
598 				       rtwdev, R_AX_WCPU_FW_CTRL);
599 	if (ret) {
600 		rtw89_err(rtwdev, "[ERR]H2C path ready\n");
601 		goto fwdl_err;
602 	}
603 
604 	ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len);
605 	if (ret) {
606 		ret = -EBUSY;
607 		goto fwdl_err;
608 	}
609 
610 	ret = rtw89_fw_download_main(rtwdev, fw, &info);
611 	if (ret) {
612 		ret = -EBUSY;
613 		goto fwdl_err;
614 	}
615 
616 	fw_info->h2c_seq = 0;
617 	fw_info->rec_seq = 0;
618 	rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
619 	rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
620 
621 	return ret;
622 
623 fwdl_err:
624 	rtw89_fw_dl_fail_dump(rtwdev);
625 	return ret;
626 }
627 
628 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
629 {
630 	struct rtw89_fw_info *fw = &rtwdev->fw;
631 
632 	wait_for_completion(&fw->completion);
633 	if (!fw->firmware)
634 		return -EINVAL;
635 
636 	return 0;
637 }
638 
639 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context)
640 {
641 	struct rtw89_fw_info *fw = context;
642 	struct rtw89_dev *rtwdev = fw->rtwdev;
643 
644 	if (!firmware || !firmware->data) {
645 		rtw89_err(rtwdev, "failed to request firmware\n");
646 		complete_all(&fw->completion);
647 		return;
648 	}
649 
650 	fw->firmware = firmware;
651 	complete_all(&fw->completion);
652 }
653 
654 int rtw89_load_firmware(struct rtw89_dev *rtwdev)
655 {
656 	struct rtw89_fw_info *fw = &rtwdev->fw;
657 	const char *fw_name = rtwdev->chip->fw_name;
658 	int ret;
659 
660 	fw->rtwdev = rtwdev;
661 	init_completion(&fw->completion);
662 
663 	if (fw->firmware) {
664 		rtw89_debug(rtwdev, RTW89_DBG_FW,
665 			    "full firmware has been early requested\n");
666 		complete_all(&fw->completion);
667 		return 0;
668 	}
669 
670 	ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
671 				      GFP_KERNEL, fw, rtw89_load_firmware_cb);
672 	if (ret) {
673 		rtw89_err(rtwdev, "failed to async firmware request\n");
674 		return ret;
675 	}
676 
677 	return 0;
678 }
679 
680 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
681 {
682 	struct rtw89_fw_info *fw = &rtwdev->fw;
683 
684 	rtw89_wait_firmware_completion(rtwdev);
685 
686 	if (fw->firmware) {
687 		release_firmware(fw->firmware);
688 
689 		/* assign NULL back in case rtw89_free_ieee80211_hw()
690 		 * try to release the same one again.
691 		 */
692 		fw->firmware = NULL;
693 	}
694 }
695 
696 #define H2C_CAM_LEN 60
697 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
698 		     struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
699 {
700 	struct sk_buff *skb;
701 	int ret;
702 
703 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
704 	if (!skb) {
705 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
706 		return -ENOMEM;
707 	}
708 	skb_put(skb, H2C_CAM_LEN);
709 	rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
710 	rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
711 
712 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
713 			      H2C_CAT_MAC,
714 			      H2C_CL_MAC_ADDR_CAM_UPDATE,
715 			      H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
716 			      H2C_CAM_LEN);
717 
718 	ret = rtw89_h2c_tx(rtwdev, skb, false);
719 	if (ret) {
720 		rtw89_err(rtwdev, "failed to send h2c\n");
721 		goto fail;
722 	}
723 
724 	return 0;
725 fail:
726 	dev_kfree_skb_any(skb);
727 
728 	return ret;
729 }
730 
731 #define H2C_DCTL_SEC_CAM_LEN 68
732 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
733 				 struct rtw89_vif *rtwvif,
734 				 struct rtw89_sta *rtwsta)
735 {
736 	struct sk_buff *skb;
737 	int ret;
738 
739 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
740 	if (!skb) {
741 		rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
742 		return -ENOMEM;
743 	}
744 	skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
745 
746 	rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
747 
748 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
749 			      H2C_CAT_MAC,
750 			      H2C_CL_MAC_FR_EXCHG,
751 			      H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
752 			      H2C_DCTL_SEC_CAM_LEN);
753 
754 	ret = rtw89_h2c_tx(rtwdev, skb, false);
755 	if (ret) {
756 		rtw89_err(rtwdev, "failed to send h2c\n");
757 		goto fail;
758 	}
759 
760 	return 0;
761 fail:
762 	dev_kfree_skb_any(skb);
763 
764 	return ret;
765 }
766 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
767 
768 #define H2C_BA_CAM_LEN 8
769 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
770 			bool valid, struct ieee80211_ampdu_params *params)
771 {
772 	const struct rtw89_chip_info *chip = rtwdev->chip;
773 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
774 	u8 macid = rtwsta->mac_id;
775 	struct sk_buff *skb;
776 	u8 entry_idx;
777 	int ret;
778 
779 	ret = valid ?
780 	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
781 	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
782 	if (ret) {
783 		/* it still works even if we don't have static BA CAM, because
784 		 * hardware can create dynamic BA CAM automatically.
785 		 */
786 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
787 			    "failed to %s entry tid=%d for h2c ba cam\n",
788 			    valid ? "alloc" : "free", params->tid);
789 		return 0;
790 	}
791 
792 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
793 	if (!skb) {
794 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
795 		return -ENOMEM;
796 	}
797 	skb_put(skb, H2C_BA_CAM_LEN);
798 	SET_BA_CAM_MACID(skb->data, macid);
799 	if (chip->bacam_v1)
800 		SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
801 	else
802 		SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
803 	if (!valid)
804 		goto end;
805 	SET_BA_CAM_VALID(skb->data, valid);
806 	SET_BA_CAM_TID(skb->data, params->tid);
807 	if (params->buf_size > 64)
808 		SET_BA_CAM_BMAP_SIZE(skb->data, 4);
809 	else
810 		SET_BA_CAM_BMAP_SIZE(skb->data, 0);
811 	/* If init req is set, hw will set the ssn */
812 	SET_BA_CAM_INIT_REQ(skb->data, 1);
813 	SET_BA_CAM_SSN(skb->data, params->ssn);
814 
815 	if (chip->bacam_v1) {
816 		SET_BA_CAM_STD_EN(skb->data, 1);
817 		SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx);
818 	}
819 
820 end:
821 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
822 			      H2C_CAT_MAC,
823 			      H2C_CL_BA_CAM,
824 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
825 			      H2C_BA_CAM_LEN);
826 
827 	ret = rtw89_h2c_tx(rtwdev, skb, false);
828 	if (ret) {
829 		rtw89_err(rtwdev, "failed to send h2c\n");
830 		goto fail;
831 	}
832 
833 	return 0;
834 fail:
835 	dev_kfree_skb_any(skb);
836 
837 	return ret;
838 }
839 
840 static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev,
841 					       u8 entry_idx, u8 uid)
842 {
843 	struct sk_buff *skb;
844 	int ret;
845 
846 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN);
847 	if (!skb) {
848 		rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
849 		return -ENOMEM;
850 	}
851 	skb_put(skb, H2C_BA_CAM_LEN);
852 
853 	SET_BA_CAM_VALID(skb->data, 1);
854 	SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx);
855 	SET_BA_CAM_UID(skb->data, uid);
856 	SET_BA_CAM_BAND(skb->data, 0);
857 	SET_BA_CAM_STD_EN(skb->data, 0);
858 
859 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
860 			      H2C_CAT_MAC,
861 			      H2C_CL_BA_CAM,
862 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
863 			      H2C_BA_CAM_LEN);
864 
865 	ret = rtw89_h2c_tx(rtwdev, skb, false);
866 	if (ret) {
867 		rtw89_err(rtwdev, "failed to send h2c\n");
868 		goto fail;
869 	}
870 
871 	return 0;
872 fail:
873 	dev_kfree_skb_any(skb);
874 
875 	return ret;
876 }
877 
878 void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev)
879 {
880 	const struct rtw89_chip_info *chip = rtwdev->chip;
881 	u8 entry_idx = chip->bacam_num;
882 	u8 uid = 0;
883 	int i;
884 
885 	for (i = 0; i < chip->bacam_dynamic_num; i++) {
886 		rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid);
887 		entry_idx++;
888 		uid++;
889 	}
890 }
891 
892 #define H2C_LOG_CFG_LEN 12
893 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
894 {
895 	struct sk_buff *skb;
896 	u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
897 			    BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
898 	int ret;
899 
900 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
901 	if (!skb) {
902 		rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
903 		return -ENOMEM;
904 	}
905 
906 	skb_put(skb, H2C_LOG_CFG_LEN);
907 	SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER);
908 	SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
909 	SET_LOG_CFG_COMP(skb->data, comp);
910 	SET_LOG_CFG_COMP_EXT(skb->data, 0);
911 
912 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
913 			      H2C_CAT_MAC,
914 			      H2C_CL_FW_INFO,
915 			      H2C_FUNC_LOG_CFG, 0, 0,
916 			      H2C_LOG_CFG_LEN);
917 
918 	ret = rtw89_h2c_tx(rtwdev, skb, false);
919 	if (ret) {
920 		rtw89_err(rtwdev, "failed to send h2c\n");
921 		goto fail;
922 	}
923 
924 	return 0;
925 fail:
926 	dev_kfree_skb_any(skb);
927 
928 	return ret;
929 }
930 
931 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
932 					struct rtw89_vif *rtwvif,
933 					enum rtw89_fw_pkt_ofld_type type,
934 					u8 *id)
935 {
936 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
937 	struct rtw89_pktofld_info *info;
938 	struct sk_buff *skb;
939 	int ret;
940 
941 	info = kzalloc(sizeof(*info), GFP_KERNEL);
942 	if (!info)
943 		return -ENOMEM;
944 
945 	switch (type) {
946 	case RTW89_PKT_OFLD_TYPE_PS_POLL:
947 		skb = ieee80211_pspoll_get(rtwdev->hw, vif);
948 		break;
949 	case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
950 		skb = ieee80211_proberesp_get(rtwdev->hw, vif);
951 		break;
952 	case RTW89_PKT_OFLD_TYPE_NULL_DATA:
953 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
954 		break;
955 	case RTW89_PKT_OFLD_TYPE_QOS_NULL:
956 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
957 		break;
958 	default:
959 		goto err;
960 	}
961 
962 	if (!skb)
963 		goto err;
964 
965 	ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
966 	kfree_skb(skb);
967 
968 	if (ret)
969 		goto err;
970 
971 	list_add_tail(&info->list, &rtwvif->general_pkt_list);
972 	*id = info->id;
973 	return 0;
974 
975 err:
976 	kfree(info);
977 	return -ENOMEM;
978 }
979 
980 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
981 					   struct rtw89_vif *rtwvif, bool notify_fw)
982 {
983 	struct list_head *pkt_list = &rtwvif->general_pkt_list;
984 	struct rtw89_pktofld_info *info, *tmp;
985 
986 	list_for_each_entry_safe(info, tmp, pkt_list, list) {
987 		if (notify_fw)
988 			rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
989 		rtw89_core_release_bit_map(rtwdev->pkt_offload,
990 					   info->id);
991 		list_del(&info->list);
992 		kfree(info);
993 	}
994 }
995 
996 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
997 {
998 	struct rtw89_vif *rtwvif;
999 
1000 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
1001 		rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw);
1002 }
1003 
1004 #define H2C_GENERAL_PKT_LEN 6
1005 #define H2C_GENERAL_PKT_ID_UND 0xff
1006 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
1007 			     struct rtw89_vif *rtwvif, u8 macid)
1008 {
1009 	u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
1010 	u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
1011 	u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
1012 	struct sk_buff *skb;
1013 	int ret;
1014 
1015 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1016 				     RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
1017 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1018 				     RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
1019 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1020 				     RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
1021 
1022 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
1023 	if (!skb) {
1024 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1025 		return -ENOMEM;
1026 	}
1027 	skb_put(skb, H2C_GENERAL_PKT_LEN);
1028 	SET_GENERAL_PKT_MACID(skb->data, macid);
1029 	SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1030 	SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
1031 	SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
1032 	SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
1033 	SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1034 
1035 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1036 			      H2C_CAT_MAC,
1037 			      H2C_CL_FW_INFO,
1038 			      H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
1039 			      H2C_GENERAL_PKT_LEN);
1040 
1041 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1042 	if (ret) {
1043 		rtw89_err(rtwdev, "failed to send h2c\n");
1044 		goto fail;
1045 	}
1046 
1047 	return 0;
1048 fail:
1049 	dev_kfree_skb_any(skb);
1050 
1051 	return ret;
1052 }
1053 
1054 #define H2C_LPS_PARM_LEN 8
1055 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
1056 			  struct rtw89_lps_parm *lps_param)
1057 {
1058 	struct sk_buff *skb;
1059 	int ret;
1060 
1061 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
1062 	if (!skb) {
1063 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1064 		return -ENOMEM;
1065 	}
1066 	skb_put(skb, H2C_LPS_PARM_LEN);
1067 
1068 	SET_LPS_PARM_MACID(skb->data, lps_param->macid);
1069 	SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
1070 	SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
1071 	SET_LPS_PARM_RLBM(skb->data, 1);
1072 	SET_LPS_PARM_SMARTPS(skb->data, 1);
1073 	SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
1074 	SET_LPS_PARM_VOUAPSD(skb->data, 0);
1075 	SET_LPS_PARM_VIUAPSD(skb->data, 0);
1076 	SET_LPS_PARM_BEUAPSD(skb->data, 0);
1077 	SET_LPS_PARM_BKUAPSD(skb->data, 0);
1078 
1079 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1080 			      H2C_CAT_MAC,
1081 			      H2C_CL_MAC_PS,
1082 			      H2C_FUNC_MAC_LPS_PARM, 0, 1,
1083 			      H2C_LPS_PARM_LEN);
1084 
1085 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1086 	if (ret) {
1087 		rtw89_err(rtwdev, "failed to send h2c\n");
1088 		goto fail;
1089 	}
1090 
1091 	return 0;
1092 fail:
1093 	dev_kfree_skb_any(skb);
1094 
1095 	return ret;
1096 }
1097 
1098 #define H2C_P2P_ACT_LEN 20
1099 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
1100 			 struct ieee80211_p2p_noa_desc *desc,
1101 			 u8 act, u8 noa_id)
1102 {
1103 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1104 	bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
1105 	u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
1106 	struct sk_buff *skb;
1107 	u8 *cmd;
1108 	int ret;
1109 
1110 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
1111 	if (!skb) {
1112 		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
1113 		return -ENOMEM;
1114 	}
1115 	skb_put(skb, H2C_P2P_ACT_LEN);
1116 	cmd = skb->data;
1117 
1118 	RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
1119 	RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
1120 	RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
1121 	RTW89_SET_FWCMD_P2P_ACT(cmd, act);
1122 	RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
1123 	RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
1124 	if (desc) {
1125 		RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
1126 		RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
1127 		RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
1128 		RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
1129 		RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
1130 	}
1131 
1132 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1133 			      H2C_CAT_MAC, H2C_CL_MAC_PS,
1134 			      H2C_FUNC_P2P_ACT, 0, 0,
1135 			      H2C_P2P_ACT_LEN);
1136 
1137 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1138 	if (ret) {
1139 		rtw89_err(rtwdev, "failed to send h2c\n");
1140 		goto fail;
1141 	}
1142 
1143 	return 0;
1144 fail:
1145 	dev_kfree_skb_any(skb);
1146 
1147 	return ret;
1148 }
1149 
1150 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
1151 				       struct sk_buff *skb)
1152 {
1153 	struct rtw89_hal *hal = &rtwdev->hal;
1154 	u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
1155 	u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
1156 
1157 	SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
1158 	SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
1159 	SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
1160 	SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
1161 	SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
1162 }
1163 
1164 #define H2C_CMC_TBL_LEN 68
1165 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
1166 				  struct rtw89_vif *rtwvif)
1167 {
1168 	const struct rtw89_chip_info *chip = rtwdev->chip;
1169 	struct sk_buff *skb;
1170 	u8 macid = rtwvif->mac_id;
1171 	int ret;
1172 
1173 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1174 	if (!skb) {
1175 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1176 		return -ENOMEM;
1177 	}
1178 	skb_put(skb, H2C_CMC_TBL_LEN);
1179 	SET_CTRL_INFO_MACID(skb->data, macid);
1180 	SET_CTRL_INFO_OPERATION(skb->data, 1);
1181 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1182 		SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
1183 		__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1184 		SET_CMC_TBL_ANTSEL_A(skb->data, 0);
1185 		SET_CMC_TBL_ANTSEL_B(skb->data, 0);
1186 		SET_CMC_TBL_ANTSEL_C(skb->data, 0);
1187 		SET_CMC_TBL_ANTSEL_D(skb->data, 0);
1188 	}
1189 	SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
1190 	SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
1191 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1192 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
1193 
1194 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1195 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1196 			      chip->h2c_cctl_func_id, 0, 1,
1197 			      H2C_CMC_TBL_LEN);
1198 
1199 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1200 	if (ret) {
1201 		rtw89_err(rtwdev, "failed to send h2c\n");
1202 		goto fail;
1203 	}
1204 
1205 	return 0;
1206 fail:
1207 	dev_kfree_skb_any(skb);
1208 
1209 	return ret;
1210 }
1211 
1212 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
1213 				     struct ieee80211_sta *sta, u8 *pads)
1214 {
1215 	bool ppe_th;
1216 	u8 ppe16, ppe8;
1217 	u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
1218 	u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
1219 	u8 ru_bitmap;
1220 	u8 n, idx, sh;
1221 	u16 ppe;
1222 	int i;
1223 
1224 	if (!sta->deflink.he_cap.has_he)
1225 		return;
1226 
1227 	ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
1228 			   sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
1229 	if (!ppe_th) {
1230 		u8 pad;
1231 
1232 		pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
1233 				sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
1234 
1235 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
1236 			pads[i] = pad;
1237 
1238 		return;
1239 	}
1240 
1241 	ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
1242 	n = hweight8(ru_bitmap);
1243 	n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
1244 
1245 	for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
1246 		if (!(ru_bitmap & BIT(i))) {
1247 			pads[i] = 1;
1248 			continue;
1249 		}
1250 
1251 		idx = n >> 3;
1252 		sh = n & 7;
1253 		n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
1254 
1255 		ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
1256 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1257 		sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
1258 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
1259 
1260 		if (ppe16 != 7 && ppe8 == 7)
1261 			pads[i] = 2;
1262 		else if (ppe8 != 7)
1263 			pads[i] = 1;
1264 		else
1265 			pads[i] = 0;
1266 	}
1267 }
1268 
1269 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
1270 				struct ieee80211_vif *vif,
1271 				struct ieee80211_sta *sta)
1272 {
1273 	const struct rtw89_chip_info *chip = rtwdev->chip;
1274 	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
1275 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
1276 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1277 	struct sk_buff *skb;
1278 	u8 pads[RTW89_PPE_BW_NUM];
1279 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1280 	u16 lowest_rate;
1281 	int ret;
1282 
1283 	memset(pads, 0, sizeof(pads));
1284 	if (sta)
1285 		__get_sta_he_pkt_padding(rtwdev, sta, pads);
1286 
1287 	if (vif->p2p)
1288 		lowest_rate = RTW89_HW_RATE_OFDM6;
1289 	else if (chan->band_type == RTW89_BAND_2G)
1290 		lowest_rate = RTW89_HW_RATE_CCK1;
1291 	else
1292 		lowest_rate = RTW89_HW_RATE_OFDM6;
1293 
1294 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1295 	if (!skb) {
1296 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1297 		return -ENOMEM;
1298 	}
1299 	skb_put(skb, H2C_CMC_TBL_LEN);
1300 	SET_CTRL_INFO_MACID(skb->data, mac_id);
1301 	SET_CTRL_INFO_OPERATION(skb->data, 1);
1302 	SET_CMC_TBL_DISRTSFB(skb->data, 1);
1303 	SET_CMC_TBL_DISDATAFB(skb->data, 1);
1304 	SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
1305 	SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
1306 	SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
1307 	if (vif->type == NL80211_IFTYPE_STATION)
1308 		SET_CMC_TBL_ULDL(skb->data, 1);
1309 	else
1310 		SET_CMC_TBL_ULDL(skb->data, 0);
1311 	SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
1312 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
1313 		SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
1314 		SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
1315 		SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
1316 		SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
1317 	} else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
1318 		SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
1319 		SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
1320 		SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
1321 		SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
1322 	}
1323 	if (sta)
1324 		SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
1325 						  sta->deflink.he_cap.has_he);
1326 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
1327 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
1328 
1329 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1330 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1331 			      chip->h2c_cctl_func_id, 0, 1,
1332 			      H2C_CMC_TBL_LEN);
1333 
1334 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1335 	if (ret) {
1336 		rtw89_err(rtwdev, "failed to send h2c\n");
1337 		goto fail;
1338 	}
1339 
1340 	return 0;
1341 fail:
1342 	dev_kfree_skb_any(skb);
1343 
1344 	return ret;
1345 }
1346 
1347 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
1348 				 struct rtw89_sta *rtwsta)
1349 {
1350 	const struct rtw89_chip_info *chip = rtwdev->chip;
1351 	struct sk_buff *skb;
1352 	int ret;
1353 
1354 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1355 	if (!skb) {
1356 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1357 		return -ENOMEM;
1358 	}
1359 	skb_put(skb, H2C_CMC_TBL_LEN);
1360 	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1361 	SET_CTRL_INFO_OPERATION(skb->data, 1);
1362 	if (rtwsta->cctl_tx_time) {
1363 		SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
1364 		SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
1365 	}
1366 	if (rtwsta->cctl_tx_retry_limit) {
1367 		SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
1368 		SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
1369 	}
1370 
1371 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1372 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1373 			      chip->h2c_cctl_func_id, 0, 1,
1374 			      H2C_CMC_TBL_LEN);
1375 
1376 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1377 	if (ret) {
1378 		rtw89_err(rtwdev, "failed to send h2c\n");
1379 		goto fail;
1380 	}
1381 
1382 	return 0;
1383 fail:
1384 	dev_kfree_skb_any(skb);
1385 
1386 	return ret;
1387 }
1388 
1389 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
1390 				 struct rtw89_sta *rtwsta)
1391 {
1392 	const struct rtw89_chip_info *chip = rtwdev->chip;
1393 	struct sk_buff *skb;
1394 	int ret;
1395 
1396 	if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
1397 		return 0;
1398 
1399 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
1400 	if (!skb) {
1401 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1402 		return -ENOMEM;
1403 	}
1404 	skb_put(skb, H2C_CMC_TBL_LEN);
1405 	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
1406 	SET_CTRL_INFO_OPERATION(skb->data, 1);
1407 
1408 	__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
1409 
1410 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1411 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1412 			      H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
1413 			      H2C_CMC_TBL_LEN);
1414 
1415 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1416 	if (ret) {
1417 		rtw89_err(rtwdev, "failed to send h2c\n");
1418 		goto fail;
1419 	}
1420 
1421 	return 0;
1422 fail:
1423 	dev_kfree_skb_any(skb);
1424 
1425 	return ret;
1426 }
1427 
1428 #define H2C_BCN_BASE_LEN 12
1429 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
1430 			       struct rtw89_vif *rtwvif)
1431 {
1432 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1433 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
1434 	struct sk_buff *skb;
1435 	struct sk_buff *skb_beacon;
1436 	u16 tim_offset;
1437 	int bcn_total_len;
1438 	u16 beacon_rate;
1439 	int ret;
1440 
1441 	if (vif->p2p)
1442 		beacon_rate = RTW89_HW_RATE_OFDM6;
1443 	else if (chan->band_type == RTW89_BAND_2G)
1444 		beacon_rate = RTW89_HW_RATE_CCK1;
1445 	else
1446 		beacon_rate = RTW89_HW_RATE_OFDM6;
1447 
1448 	skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
1449 					      NULL, 0);
1450 	if (!skb_beacon) {
1451 		rtw89_err(rtwdev, "failed to get beacon skb\n");
1452 		return -ENOMEM;
1453 	}
1454 
1455 	bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
1456 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
1457 	if (!skb) {
1458 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1459 		dev_kfree_skb_any(skb_beacon);
1460 		return -ENOMEM;
1461 	}
1462 	skb_put(skb, H2C_BCN_BASE_LEN);
1463 
1464 	SET_BCN_UPD_PORT(skb->data, rtwvif->port);
1465 	SET_BCN_UPD_MBSSID(skb->data, 0);
1466 	SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
1467 	SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
1468 	SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
1469 	SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
1470 	SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
1471 	SET_BCN_UPD_RATE(skb->data, beacon_rate);
1472 
1473 	skb_put_data(skb, skb_beacon->data, skb_beacon->len);
1474 	dev_kfree_skb_any(skb_beacon);
1475 
1476 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1477 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
1478 			      H2C_FUNC_MAC_BCN_UPD, 0, 1,
1479 			      bcn_total_len);
1480 
1481 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1482 	if (ret) {
1483 		rtw89_err(rtwdev, "failed to send h2c\n");
1484 		dev_kfree_skb_any(skb);
1485 		return ret;
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 #define H2C_ROLE_MAINTAIN_LEN 4
1492 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
1493 			       struct rtw89_vif *rtwvif,
1494 			       struct rtw89_sta *rtwsta,
1495 			       enum rtw89_upd_mode upd_mode)
1496 {
1497 	struct sk_buff *skb;
1498 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1499 	u8 self_role;
1500 	int ret;
1501 
1502 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
1503 		if (rtwsta)
1504 			self_role = RTW89_SELF_ROLE_AP_CLIENT;
1505 		else
1506 			self_role = rtwvif->self_role;
1507 	} else {
1508 		self_role = rtwvif->self_role;
1509 	}
1510 
1511 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
1512 	if (!skb) {
1513 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1514 		return -ENOMEM;
1515 	}
1516 	skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
1517 	SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
1518 	SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
1519 	SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
1520 	SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
1521 
1522 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1523 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
1524 			      H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
1525 			      H2C_ROLE_MAINTAIN_LEN);
1526 
1527 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1528 	if (ret) {
1529 		rtw89_err(rtwdev, "failed to send h2c\n");
1530 		goto fail;
1531 	}
1532 
1533 	return 0;
1534 fail:
1535 	dev_kfree_skb_any(skb);
1536 
1537 	return ret;
1538 }
1539 
1540 #define H2C_JOIN_INFO_LEN 4
1541 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1542 			   struct rtw89_sta *rtwsta, bool dis_conn)
1543 {
1544 	struct sk_buff *skb;
1545 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1546 	u8 self_role = rtwvif->self_role;
1547 	u8 net_type = rtwvif->net_type;
1548 	int ret;
1549 
1550 	if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
1551 		self_role = RTW89_SELF_ROLE_AP_CLIENT;
1552 		net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
1553 	}
1554 
1555 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
1556 	if (!skb) {
1557 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1558 		return -ENOMEM;
1559 	}
1560 	skb_put(skb, H2C_JOIN_INFO_LEN);
1561 	SET_JOININFO_MACID(skb->data, mac_id);
1562 	SET_JOININFO_OP(skb->data, dis_conn);
1563 	SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
1564 	SET_JOININFO_WMM(skb->data, rtwvif->wmm);
1565 	SET_JOININFO_TGR(skb->data, rtwvif->trigger);
1566 	SET_JOININFO_ISHESTA(skb->data, 0);
1567 	SET_JOININFO_DLBW(skb->data, 0);
1568 	SET_JOININFO_TF_MAC_PAD(skb->data, 0);
1569 	SET_JOININFO_DL_T_PE(skb->data, 0);
1570 	SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
1571 	SET_JOININFO_NET_TYPE(skb->data, net_type);
1572 	SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
1573 	SET_JOININFO_SELF_ROLE(skb->data, self_role);
1574 
1575 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1576 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
1577 			      H2C_FUNC_MAC_JOININFO, 0, 1,
1578 			      H2C_JOIN_INFO_LEN);
1579 
1580 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1581 	if (ret) {
1582 		rtw89_err(rtwdev, "failed to send h2c\n");
1583 		goto fail;
1584 	}
1585 
1586 	return 0;
1587 fail:
1588 	dev_kfree_skb_any(skb);
1589 
1590 	return ret;
1591 }
1592 
1593 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
1594 			     bool pause)
1595 {
1596 	struct rtw89_fw_macid_pause_grp h2c = {{0}};
1597 	u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
1598 	struct sk_buff *skb;
1599 	int ret;
1600 
1601 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN);
1602 	if (!skb) {
1603 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1604 		return -ENOMEM;
1605 	}
1606 	h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
1607 	if (pause)
1608 		h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
1609 	skb_put_data(skb, &h2c, len);
1610 
1611 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1612 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1613 			      H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
1614 			      len);
1615 
1616 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1617 	if (ret) {
1618 		rtw89_err(rtwdev, "failed to send h2c\n");
1619 		goto fail;
1620 	}
1621 
1622 	return 0;
1623 fail:
1624 	dev_kfree_skb_any(skb);
1625 
1626 	return ret;
1627 }
1628 
1629 #define H2C_EDCA_LEN 12
1630 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1631 			  u8 ac, u32 val)
1632 {
1633 	struct sk_buff *skb;
1634 	int ret;
1635 
1636 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
1637 	if (!skb) {
1638 		rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
1639 		return -ENOMEM;
1640 	}
1641 	skb_put(skb, H2C_EDCA_LEN);
1642 	RTW89_SET_EDCA_SEL(skb->data, 0);
1643 	RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
1644 	RTW89_SET_EDCA_WMM(skb->data, 0);
1645 	RTW89_SET_EDCA_AC(skb->data, ac);
1646 	RTW89_SET_EDCA_PARAM(skb->data, val);
1647 
1648 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1649 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1650 			      H2C_FUNC_USR_EDCA, 0, 1,
1651 			      H2C_EDCA_LEN);
1652 
1653 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1654 	if (ret) {
1655 		rtw89_err(rtwdev, "failed to send h2c\n");
1656 		goto fail;
1657 	}
1658 
1659 	return 0;
1660 fail:
1661 	dev_kfree_skb_any(skb);
1662 
1663 	return ret;
1664 }
1665 
1666 #define H2C_TSF32_TOGL_LEN 4
1667 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1668 			      bool en)
1669 {
1670 	struct sk_buff *skb;
1671 	u16 early_us = en ? 2000 : 0;
1672 	u8 *cmd;
1673 	int ret;
1674 
1675 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
1676 	if (!skb) {
1677 		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
1678 		return -ENOMEM;
1679 	}
1680 	skb_put(skb, H2C_TSF32_TOGL_LEN);
1681 	cmd = skb->data;
1682 
1683 	RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
1684 	RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
1685 	RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
1686 	RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
1687 
1688 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1689 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1690 			      H2C_FUNC_TSF32_TOGL, 0, 0,
1691 			      H2C_TSF32_TOGL_LEN);
1692 
1693 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1694 	if (ret) {
1695 		rtw89_err(rtwdev, "failed to send h2c\n");
1696 		goto fail;
1697 	}
1698 
1699 	return 0;
1700 fail:
1701 	dev_kfree_skb_any(skb);
1702 
1703 	return ret;
1704 }
1705 
1706 #define H2C_OFLD_CFG_LEN 8
1707 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
1708 {
1709 	static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
1710 	struct sk_buff *skb;
1711 	int ret;
1712 
1713 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
1714 	if (!skb) {
1715 		rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
1716 		return -ENOMEM;
1717 	}
1718 	skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
1719 
1720 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1721 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
1722 			      H2C_FUNC_OFLD_CFG, 0, 1,
1723 			      H2C_OFLD_CFG_LEN);
1724 
1725 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1726 	if (ret) {
1727 		rtw89_err(rtwdev, "failed to send h2c\n");
1728 		goto fail;
1729 	}
1730 
1731 	return 0;
1732 fail:
1733 	dev_kfree_skb_any(skb);
1734 
1735 	return ret;
1736 }
1737 
1738 #define H2C_RA_LEN 16
1739 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
1740 {
1741 	struct sk_buff *skb;
1742 	u8 *cmd;
1743 	int ret;
1744 
1745 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN);
1746 	if (!skb) {
1747 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
1748 		return -ENOMEM;
1749 	}
1750 	skb_put(skb, H2C_RA_LEN);
1751 	cmd = skb->data;
1752 	rtw89_debug(rtwdev, RTW89_DBG_RA,
1753 		    "ra cmd msk: %llx ", ra->ra_mask);
1754 
1755 	RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl);
1756 	RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap);
1757 	RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid);
1758 	RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap);
1759 	RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap);
1760 	RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv);
1761 	RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all);
1762 	RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi);
1763 	RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap);
1764 	RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap);
1765 	RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num);
1766 	RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf);
1767 	RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask);
1768 	RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask);
1769 	RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask));
1770 	RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask));
1771 	RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
1772 	RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
1773 	RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
1774 	RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en);
1775 	RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf);
1776 
1777 	if (csi) {
1778 		RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
1779 		RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num);
1780 		RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel);
1781 		RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en);
1782 		RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en);
1783 		RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx);
1784 		RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode);
1785 		RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf);
1786 		RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw);
1787 	}
1788 
1789 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1790 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
1791 			      H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
1792 			      H2C_RA_LEN);
1793 
1794 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1795 	if (ret) {
1796 		rtw89_err(rtwdev, "failed to send h2c\n");
1797 		goto fail;
1798 	}
1799 
1800 	return 0;
1801 fail:
1802 	dev_kfree_skb_any(skb);
1803 
1804 	return ret;
1805 }
1806 
1807 #define H2C_LEN_CXDRVHDR 2
1808 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR)
1809 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
1810 {
1811 	struct rtw89_btc *btc = &rtwdev->btc;
1812 	struct rtw89_btc_dm *dm = &btc->dm;
1813 	struct rtw89_btc_init_info *init_info = &dm->init_info;
1814 	struct rtw89_btc_module *module = &init_info->module;
1815 	struct rtw89_btc_ant_info *ant = &module->ant;
1816 	struct sk_buff *skb;
1817 	u8 *cmd;
1818 	int ret;
1819 
1820 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT);
1821 	if (!skb) {
1822 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
1823 		return -ENOMEM;
1824 	}
1825 	skb_put(skb, H2C_LEN_CXDRVINFO_INIT);
1826 	cmd = skb->data;
1827 
1828 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT);
1829 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR);
1830 
1831 	RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type);
1832 	RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num);
1833 	RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation);
1834 	RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos);
1835 	RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity);
1836 
1837 	RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type);
1838 	RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv);
1839 	RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo);
1840 	RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos);
1841 	RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type);
1842 
1843 	RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch);
1844 	RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only);
1845 	RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok);
1846 	RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en);
1847 	RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other);
1848 	RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only);
1849 
1850 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1851 			      H2C_CAT_OUTSRC, BTFC_SET,
1852 			      SET_DRV_INFO, 0, 0,
1853 			      H2C_LEN_CXDRVINFO_INIT);
1854 
1855 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1856 	if (ret) {
1857 		rtw89_err(rtwdev, "failed to send h2c\n");
1858 		goto fail;
1859 	}
1860 
1861 	return 0;
1862 fail:
1863 	dev_kfree_skb_any(skb);
1864 
1865 	return ret;
1866 }
1867 
1868 #define PORT_DATA_OFFSET 4
1869 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
1870 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
1871 	(4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
1872 
1873 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
1874 {
1875 	struct rtw89_btc *btc = &rtwdev->btc;
1876 	const struct rtw89_btc_ver *ver = btc->ver;
1877 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
1878 	struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
1879 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
1880 	struct rtw89_btc_wl_active_role *active = role_info->active_role;
1881 	struct sk_buff *skb;
1882 	u32 len;
1883 	u8 offset = 0;
1884 	u8 *cmd;
1885 	int ret;
1886 	int i;
1887 
1888 	len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
1889 
1890 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1891 	if (!skb) {
1892 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
1893 		return -ENOMEM;
1894 	}
1895 	skb_put(skb, len);
1896 	cmd = skb->data;
1897 
1898 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
1899 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
1900 
1901 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
1902 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
1903 
1904 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
1905 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
1906 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
1907 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
1908 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
1909 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
1910 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
1911 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
1912 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
1913 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
1914 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
1915 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
1916 
1917 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
1918 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
1919 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
1920 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
1921 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
1922 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
1923 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
1924 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
1925 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
1926 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
1927 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
1928 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
1929 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
1930 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
1931 	}
1932 
1933 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1934 			      H2C_CAT_OUTSRC, BTFC_SET,
1935 			      SET_DRV_INFO, 0, 0,
1936 			      len);
1937 
1938 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1939 	if (ret) {
1940 		rtw89_err(rtwdev, "failed to send h2c\n");
1941 		goto fail;
1942 	}
1943 
1944 	return 0;
1945 fail:
1946 	dev_kfree_skb_any(skb);
1947 
1948 	return ret;
1949 }
1950 
1951 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
1952 	(4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
1953 
1954 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
1955 {
1956 	struct rtw89_btc *btc = &rtwdev->btc;
1957 	const struct rtw89_btc_ver *ver = btc->ver;
1958 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
1959 	struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
1960 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
1961 	struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
1962 	struct sk_buff *skb;
1963 	u32 len;
1964 	u8 *cmd, offset;
1965 	int ret;
1966 	int i;
1967 
1968 	len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
1969 
1970 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1971 	if (!skb) {
1972 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
1973 		return -ENOMEM;
1974 	}
1975 	skb_put(skb, len);
1976 	cmd = skb->data;
1977 
1978 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
1979 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
1980 
1981 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
1982 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
1983 
1984 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
1985 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
1986 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
1987 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
1988 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
1989 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
1990 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
1991 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
1992 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
1993 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
1994 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
1995 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
1996 
1997 	offset = PORT_DATA_OFFSET;
1998 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
1999 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
2000 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
2001 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
2002 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
2003 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
2004 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
2005 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
2006 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
2007 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
2008 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
2009 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
2010 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
2011 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
2012 		RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
2013 	}
2014 
2015 	offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
2016 	RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
2017 	RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
2018 	RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
2019 	RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
2020 	RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
2021 	RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
2022 
2023 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2024 			      H2C_CAT_OUTSRC, BTFC_SET,
2025 			      SET_DRV_INFO, 0, 0,
2026 			      len);
2027 
2028 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2029 	if (ret) {
2030 		rtw89_err(rtwdev, "failed to send h2c\n");
2031 		goto fail;
2032 	}
2033 
2034 	return 0;
2035 fail:
2036 	dev_kfree_skb_any(skb);
2037 
2038 	return ret;
2039 }
2040 
2041 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
2042 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
2043 {
2044 	struct rtw89_btc *btc = &rtwdev->btc;
2045 	const struct rtw89_btc_ver *ver = btc->ver;
2046 	struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
2047 	struct sk_buff *skb;
2048 	u8 *cmd;
2049 	int ret;
2050 
2051 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
2052 	if (!skb) {
2053 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2054 		return -ENOMEM;
2055 	}
2056 	skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
2057 	cmd = skb->data;
2058 
2059 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
2060 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
2061 
2062 	RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
2063 	RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
2064 	RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
2065 	if (ver->fcxctrl == 0)
2066 		RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
2067 
2068 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2069 			      H2C_CAT_OUTSRC, BTFC_SET,
2070 			      SET_DRV_INFO, 0, 0,
2071 			      H2C_LEN_CXDRVINFO_CTRL);
2072 
2073 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2074 	if (ret) {
2075 		rtw89_err(rtwdev, "failed to send h2c\n");
2076 		goto fail;
2077 	}
2078 
2079 	return 0;
2080 fail:
2081 	dev_kfree_skb_any(skb);
2082 
2083 	return ret;
2084 }
2085 
2086 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
2087 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
2088 {
2089 	struct rtw89_btc *btc = &rtwdev->btc;
2090 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
2091 	struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
2092 	struct sk_buff *skb;
2093 	u8 *cmd;
2094 	int ret;
2095 
2096 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
2097 	if (!skb) {
2098 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2099 		return -ENOMEM;
2100 	}
2101 	skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
2102 	cmd = skb->data;
2103 
2104 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
2105 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
2106 
2107 	RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
2108 	RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
2109 	RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
2110 	RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
2111 	RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
2112 
2113 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2114 			      H2C_CAT_OUTSRC, BTFC_SET,
2115 			      SET_DRV_INFO, 0, 0,
2116 			      H2C_LEN_CXDRVINFO_RFK);
2117 
2118 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2119 	if (ret) {
2120 		rtw89_err(rtwdev, "failed to send h2c\n");
2121 		goto fail;
2122 	}
2123 
2124 	return 0;
2125 fail:
2126 	dev_kfree_skb_any(skb);
2127 
2128 	return ret;
2129 }
2130 
2131 #define H2C_LEN_PKT_OFLD 4
2132 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
2133 {
2134 	struct sk_buff *skb;
2135 	u8 *cmd;
2136 	int ret;
2137 
2138 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
2139 	if (!skb) {
2140 		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
2141 		return -ENOMEM;
2142 	}
2143 	skb_put(skb, H2C_LEN_PKT_OFLD);
2144 	cmd = skb->data;
2145 
2146 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
2147 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
2148 
2149 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2150 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2151 			      H2C_FUNC_PACKET_OFLD, 1, 1,
2152 			      H2C_LEN_PKT_OFLD);
2153 
2154 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2155 	if (ret) {
2156 		rtw89_err(rtwdev, "failed to send h2c\n");
2157 		goto fail;
2158 	}
2159 
2160 	return 0;
2161 fail:
2162 	dev_kfree_skb_any(skb);
2163 
2164 	return ret;
2165 }
2166 
2167 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
2168 				 struct sk_buff *skb_ofld)
2169 {
2170 	struct sk_buff *skb;
2171 	u8 *cmd;
2172 	u8 alloc_id;
2173 	int ret;
2174 
2175 	alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
2176 					      RTW89_MAX_PKT_OFLD_NUM);
2177 	if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
2178 		return -ENOSPC;
2179 
2180 	*id = alloc_id;
2181 
2182 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
2183 	if (!skb) {
2184 		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
2185 		rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
2186 		return -ENOMEM;
2187 	}
2188 	skb_put(skb, H2C_LEN_PKT_OFLD);
2189 	cmd = skb->data;
2190 
2191 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
2192 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
2193 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
2194 	skb_put_data(skb, skb_ofld->data, skb_ofld->len);
2195 
2196 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2197 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2198 			      H2C_FUNC_PACKET_OFLD, 1, 1,
2199 			      H2C_LEN_PKT_OFLD + skb_ofld->len);
2200 
2201 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2202 	if (ret) {
2203 		rtw89_err(rtwdev, "failed to send h2c\n");
2204 		rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
2205 		goto fail;
2206 	}
2207 
2208 	return 0;
2209 fail:
2210 	dev_kfree_skb_any(skb);
2211 
2212 	return ret;
2213 }
2214 
2215 #define H2C_LEN_SCAN_LIST_OFFLOAD 4
2216 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len,
2217 				   struct list_head *chan_list)
2218 {
2219 	struct rtw89_mac_chinfo *ch_info;
2220 	struct sk_buff *skb;
2221 	int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE;
2222 	u8 *cmd;
2223 	int ret;
2224 
2225 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
2226 	if (!skb) {
2227 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
2228 		return -ENOMEM;
2229 	}
2230 	skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD);
2231 	cmd = skb->data;
2232 
2233 	RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len);
2234 	/* in unit of 4 bytes */
2235 	RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4);
2236 
2237 	list_for_each_entry(ch_info, chan_list, list) {
2238 		cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE);
2239 
2240 		RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period);
2241 		RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time);
2242 		RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch);
2243 		RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch);
2244 		RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw);
2245 		RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action);
2246 		RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt);
2247 		RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt);
2248 		RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data);
2249 		RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band);
2250 		RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id);
2251 		RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch);
2252 		RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null);
2253 		RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num);
2254 		RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]);
2255 		RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]);
2256 		RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]);
2257 		RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]);
2258 		RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]);
2259 		RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]);
2260 		RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]);
2261 		RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]);
2262 	}
2263 
2264 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2265 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2266 			      H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
2267 
2268 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2269 	if (ret) {
2270 		rtw89_err(rtwdev, "failed to send h2c\n");
2271 		goto fail;
2272 	}
2273 
2274 	return 0;
2275 fail:
2276 	dev_kfree_skb_any(skb);
2277 
2278 	return ret;
2279 }
2280 
2281 #define H2C_LEN_SCAN_OFFLOAD 28
2282 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
2283 			      struct rtw89_scan_option *option,
2284 			      struct rtw89_vif *rtwvif)
2285 {
2286 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2287 	struct sk_buff *skb;
2288 	u8 *cmd;
2289 	int ret;
2290 
2291 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD);
2292 	if (!skb) {
2293 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
2294 		return -ENOMEM;
2295 	}
2296 	skb_put(skb, H2C_LEN_SCAN_OFFLOAD);
2297 	cmd = skb->data;
2298 
2299 	RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id);
2300 	RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port);
2301 	RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0);
2302 	RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable);
2303 	RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true);
2304 	RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode);
2305 	RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE);
2306 	RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE);
2307 	if (option->target_ch_mode) {
2308 		RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw);
2309 		RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd,
2310 						       scan_info->op_pri_ch);
2311 		RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd,
2312 							   scan_info->op_chan);
2313 		RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd,
2314 							scan_info->op_band);
2315 	}
2316 
2317 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2318 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
2319 			      H2C_FUNC_SCANOFLD, 1, 1,
2320 			      H2C_LEN_SCAN_OFFLOAD);
2321 
2322 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2323 	if (ret) {
2324 		rtw89_err(rtwdev, "failed to send h2c\n");
2325 		goto fail;
2326 	}
2327 
2328 	return 0;
2329 fail:
2330 	dev_kfree_skb_any(skb);
2331 
2332 	return ret;
2333 }
2334 
2335 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
2336 			struct rtw89_fw_h2c_rf_reg_info *info,
2337 			u16 len, u8 page)
2338 {
2339 	struct sk_buff *skb;
2340 	u8 class = info->rf_path == RF_PATH_A ?
2341 		   H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
2342 	int ret;
2343 
2344 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2345 	if (!skb) {
2346 		rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
2347 		return -ENOMEM;
2348 	}
2349 	skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
2350 
2351 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2352 			      H2C_CAT_OUTSRC, class, page, 0, 0,
2353 			      len);
2354 
2355 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2356 	if (ret) {
2357 		rtw89_err(rtwdev, "failed to send h2c\n");
2358 		goto fail;
2359 	}
2360 
2361 	return 0;
2362 fail:
2363 	dev_kfree_skb_any(skb);
2364 
2365 	return ret;
2366 }
2367 
2368 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
2369 {
2370 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2371 	struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
2372 	struct rtw89_fw_h2c_rf_get_mccch *mccch;
2373 	struct sk_buff *skb;
2374 	int ret;
2375 
2376 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
2377 	if (!skb) {
2378 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
2379 		return -ENOMEM;
2380 	}
2381 	skb_put(skb, sizeof(*mccch));
2382 	mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
2383 
2384 	mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
2385 	mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
2386 	mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
2387 	mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
2388 	mccch->current_channel = cpu_to_le32(chan->channel);
2389 	mccch->current_band_type = cpu_to_le32(chan->band_type);
2390 
2391 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2392 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
2393 			      H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
2394 			      sizeof(*mccch));
2395 
2396 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2397 	if (ret) {
2398 		rtw89_err(rtwdev, "failed to send h2c\n");
2399 		goto fail;
2400 	}
2401 
2402 	return 0;
2403 fail:
2404 	dev_kfree_skb_any(skb);
2405 
2406 	return ret;
2407 }
2408 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
2409 
2410 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
2411 			      u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
2412 			      bool rack, bool dack)
2413 {
2414 	struct sk_buff *skb;
2415 	int ret;
2416 
2417 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2418 	if (!skb) {
2419 		rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
2420 		return -ENOMEM;
2421 	}
2422 	skb_put_data(skb, buf, len);
2423 
2424 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2425 			      H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
2426 			      len);
2427 
2428 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2429 	if (ret) {
2430 		rtw89_err(rtwdev, "failed to send h2c\n");
2431 		goto fail;
2432 	}
2433 
2434 	return 0;
2435 fail:
2436 	dev_kfree_skb_any(skb);
2437 
2438 	return ret;
2439 }
2440 
2441 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
2442 {
2443 	struct sk_buff *skb;
2444 	int ret;
2445 
2446 	skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
2447 	if (!skb) {
2448 		rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
2449 		return -ENOMEM;
2450 	}
2451 	skb_put_data(skb, buf, len);
2452 
2453 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2454 	if (ret) {
2455 		rtw89_err(rtwdev, "failed to send h2c\n");
2456 		goto fail;
2457 	}
2458 
2459 	return 0;
2460 fail:
2461 	dev_kfree_skb_any(skb);
2462 
2463 	return ret;
2464 }
2465 
2466 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
2467 {
2468 	struct rtw89_early_h2c *early_h2c;
2469 
2470 	lockdep_assert_held(&rtwdev->mutex);
2471 
2472 	list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
2473 		rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
2474 	}
2475 }
2476 
2477 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
2478 {
2479 	struct rtw89_early_h2c *early_h2c, *tmp;
2480 
2481 	mutex_lock(&rtwdev->mutex);
2482 	list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
2483 		list_del(&early_h2c->list);
2484 		kfree(early_h2c->h2c);
2485 		kfree(early_h2c);
2486 	}
2487 	mutex_unlock(&rtwdev->mutex);
2488 }
2489 
2490 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
2491 {
2492 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
2493 
2494 	attr->category = RTW89_GET_C2H_CATEGORY(c2h->data);
2495 	attr->class = RTW89_GET_C2H_CLASS(c2h->data);
2496 	attr->func = RTW89_GET_C2H_FUNC(c2h->data);
2497 	attr->len = RTW89_GET_C2H_LEN(c2h->data);
2498 }
2499 
2500 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
2501 				    struct sk_buff *c2h)
2502 {
2503 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
2504 	u8 category = attr->category;
2505 	u8 class = attr->class;
2506 	u8 func = attr->func;
2507 
2508 	switch (category) {
2509 	default:
2510 		return false;
2511 	case RTW89_C2H_CAT_MAC:
2512 		return rtw89_mac_c2h_chk_atomic(rtwdev, class, func);
2513 	}
2514 }
2515 
2516 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
2517 {
2518 	rtw89_fw_c2h_parse_attr(c2h);
2519 	if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
2520 		goto enqueue;
2521 
2522 	rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
2523 	dev_kfree_skb_any(c2h);
2524 	return;
2525 
2526 enqueue:
2527 	skb_queue_tail(&rtwdev->c2h_queue, c2h);
2528 	ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
2529 }
2530 
2531 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
2532 				    struct sk_buff *skb)
2533 {
2534 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
2535 	u8 category = attr->category;
2536 	u8 class = attr->class;
2537 	u8 func = attr->func;
2538 	u16 len = attr->len;
2539 	bool dump = true;
2540 
2541 	if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
2542 		return;
2543 
2544 	switch (category) {
2545 	case RTW89_C2H_CAT_TEST:
2546 		break;
2547 	case RTW89_C2H_CAT_MAC:
2548 		rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
2549 		if (class == RTW89_MAC_C2H_CLASS_INFO &&
2550 		    func == RTW89_MAC_C2H_FUNC_C2H_LOG)
2551 			dump = false;
2552 		break;
2553 	case RTW89_C2H_CAT_OUTSRC:
2554 		if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
2555 		    class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
2556 			rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
2557 		else
2558 			rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
2559 		break;
2560 	}
2561 
2562 	if (dump)
2563 		rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
2564 }
2565 
2566 void rtw89_fw_c2h_work(struct work_struct *work)
2567 {
2568 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
2569 						c2h_work);
2570 	struct sk_buff *skb, *tmp;
2571 
2572 	skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
2573 		skb_unlink(skb, &rtwdev->c2h_queue);
2574 		mutex_lock(&rtwdev->mutex);
2575 		rtw89_fw_c2h_cmd_handle(rtwdev, skb);
2576 		mutex_unlock(&rtwdev->mutex);
2577 		dev_kfree_skb_any(skb);
2578 	}
2579 }
2580 
2581 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
2582 				  struct rtw89_mac_h2c_info *info)
2583 {
2584 	const struct rtw89_chip_info *chip = rtwdev->chip;
2585 	const u32 *h2c_reg = chip->h2c_regs;
2586 	u8 i, val, len;
2587 	int ret;
2588 
2589 	ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
2590 				rtwdev, chip->h2c_ctrl_reg);
2591 	if (ret) {
2592 		rtw89_warn(rtwdev, "FW does not process h2c registers\n");
2593 		return ret;
2594 	}
2595 
2596 	len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
2597 			   sizeof(info->h2creg[0]));
2598 
2599 	RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id);
2600 	RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len);
2601 	for (i = 0; i < RTW89_H2CREG_MAX; i++)
2602 		rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
2603 
2604 	rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
2605 
2606 	return 0;
2607 }
2608 
2609 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
2610 				 struct rtw89_mac_c2h_info *info)
2611 {
2612 	const struct rtw89_chip_info *chip = rtwdev->chip;
2613 	const u32 *c2h_reg = chip->c2h_regs;
2614 	u32 ret;
2615 	u8 i, val;
2616 
2617 	info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
2618 
2619 	ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
2620 				       RTW89_C2H_TIMEOUT, false, rtwdev,
2621 				       chip->c2h_ctrl_reg);
2622 	if (ret) {
2623 		rtw89_warn(rtwdev, "c2h reg timeout\n");
2624 		return ret;
2625 	}
2626 
2627 	for (i = 0; i < RTW89_C2HREG_MAX; i++)
2628 		info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
2629 
2630 	rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
2631 
2632 	info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
2633 	info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
2634 				RTW89_C2HREG_HDR_LEN;
2635 
2636 	return 0;
2637 }
2638 
2639 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
2640 		     struct rtw89_mac_h2c_info *h2c_info,
2641 		     struct rtw89_mac_c2h_info *c2h_info)
2642 {
2643 	u32 ret;
2644 
2645 	if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
2646 		lockdep_assert_held(&rtwdev->mutex);
2647 
2648 	if (!h2c_info && !c2h_info)
2649 		return -EINVAL;
2650 
2651 	if (!h2c_info)
2652 		goto recv_c2h;
2653 
2654 	ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
2655 	if (ret)
2656 		return ret;
2657 
2658 recv_c2h:
2659 	if (!c2h_info)
2660 		return 0;
2661 
2662 	ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
2663 	if (ret)
2664 		return ret;
2665 
2666 	return 0;
2667 }
2668 
2669 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
2670 {
2671 	if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
2672 		rtw89_err(rtwdev, "[ERR]pwr is off\n");
2673 		return;
2674 	}
2675 
2676 	rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
2677 	rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
2678 	rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
2679 	rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
2680 	rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
2681 		   rtw89_read32(rtwdev, R_AX_HALT_C2H));
2682 	rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
2683 		   rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
2684 
2685 	rtw89_fw_prog_cnt_dump(rtwdev);
2686 }
2687 
2688 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
2689 {
2690 	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
2691 	struct rtw89_pktofld_info *info, *tmp;
2692 	u8 idx;
2693 
2694 	for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
2695 		if (!(rtwdev->chip->support_bands & BIT(idx)))
2696 			continue;
2697 
2698 		list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
2699 			rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
2700 			rtw89_core_release_bit_map(rtwdev->pkt_offload,
2701 						   info->id);
2702 			list_del(&info->list);
2703 			kfree(info);
2704 		}
2705 	}
2706 }
2707 
2708 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
2709 					     struct rtw89_vif *rtwvif,
2710 					     struct rtw89_pktofld_info *info,
2711 					     enum nl80211_band band, u8 ssid_idx)
2712 {
2713 	struct cfg80211_scan_request *req = rtwvif->scan_req;
2714 
2715 	if (band != NL80211_BAND_6GHZ)
2716 		return false;
2717 
2718 	if (req->ssids[ssid_idx].ssid_len) {
2719 		memcpy(info->ssid, req->ssids[ssid_idx].ssid,
2720 		       req->ssids[ssid_idx].ssid_len);
2721 		info->ssid_len = req->ssids[ssid_idx].ssid_len;
2722 		return false;
2723 	} else {
2724 		return true;
2725 	}
2726 }
2727 
2728 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
2729 				     struct rtw89_vif *rtwvif,
2730 				     struct sk_buff *skb, u8 ssid_idx)
2731 {
2732 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2733 	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
2734 	struct rtw89_pktofld_info *info;
2735 	struct sk_buff *new;
2736 	int ret = 0;
2737 	u8 band;
2738 
2739 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
2740 		if (!(rtwdev->chip->support_bands & BIT(band)))
2741 			continue;
2742 
2743 		new = skb_copy(skb, GFP_KERNEL);
2744 		if (!new) {
2745 			ret = -ENOMEM;
2746 			goto out;
2747 		}
2748 		skb_put_data(new, ies->ies[band], ies->len[band]);
2749 		skb_put_data(new, ies->common_ies, ies->common_ie_len);
2750 
2751 		info = kzalloc(sizeof(*info), GFP_KERNEL);
2752 		if (!info) {
2753 			ret = -ENOMEM;
2754 			kfree_skb(new);
2755 			goto out;
2756 		}
2757 
2758 		if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
2759 						     ssid_idx)) {
2760 			kfree_skb(new);
2761 			kfree(info);
2762 			goto out;
2763 		}
2764 
2765 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
2766 		if (ret) {
2767 			kfree_skb(new);
2768 			kfree(info);
2769 			goto out;
2770 		}
2771 
2772 		list_add_tail(&info->list, &scan_info->pkt_list[band]);
2773 		kfree_skb(new);
2774 	}
2775 out:
2776 	return ret;
2777 }
2778 
2779 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
2780 					  struct rtw89_vif *rtwvif)
2781 {
2782 	struct cfg80211_scan_request *req = rtwvif->scan_req;
2783 	struct sk_buff *skb;
2784 	u8 num = req->n_ssids, i;
2785 	int ret;
2786 
2787 	for (i = 0; i < num; i++) {
2788 		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
2789 					     req->ssids[i].ssid,
2790 					     req->ssids[i].ssid_len,
2791 					     req->ie_len);
2792 		if (!skb)
2793 			return -ENOMEM;
2794 
2795 		ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i);
2796 		kfree_skb(skb);
2797 
2798 		if (ret)
2799 			return ret;
2800 	}
2801 
2802 	return 0;
2803 }
2804 
2805 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
2806 				      struct cfg80211_scan_request *req,
2807 				      struct rtw89_mac_chinfo *ch_info)
2808 {
2809 	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2810 	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
2811 	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
2812 	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
2813 	struct cfg80211_scan_6ghz_params *params;
2814 	struct rtw89_pktofld_info *info, *tmp;
2815 	struct ieee80211_hdr *hdr;
2816 	struct sk_buff *skb;
2817 	bool found;
2818 	int ret = 0;
2819 	u8 i;
2820 
2821 	if (!req->n_6ghz_params)
2822 		return 0;
2823 
2824 	for (i = 0; i < req->n_6ghz_params; i++) {
2825 		params = &req->scan_6ghz_params[i];
2826 
2827 		if (req->channels[params->channel_idx]->hw_value !=
2828 		    ch_info->pri_ch)
2829 			continue;
2830 
2831 		found = false;
2832 		list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
2833 			if (ether_addr_equal(tmp->bssid, params->bssid)) {
2834 				found = true;
2835 				break;
2836 			}
2837 		}
2838 		if (found)
2839 			continue;
2840 
2841 		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
2842 					     NULL, 0, req->ie_len);
2843 		skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
2844 		skb_put_data(skb, ies->common_ies, ies->common_ie_len);
2845 		hdr = (struct ieee80211_hdr *)skb->data;
2846 		ether_addr_copy(hdr->addr3, params->bssid);
2847 
2848 		info = kzalloc(sizeof(*info), GFP_KERNEL);
2849 		if (!info) {
2850 			ret = -ENOMEM;
2851 			kfree_skb(skb);
2852 			goto out;
2853 		}
2854 
2855 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
2856 		if (ret) {
2857 			kfree_skb(skb);
2858 			kfree(info);
2859 			goto out;
2860 		}
2861 
2862 		ether_addr_copy(info->bssid, params->bssid);
2863 		info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
2864 		list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
2865 
2866 		ch_info->tx_pkt = true;
2867 		ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
2868 
2869 		kfree_skb(skb);
2870 	}
2871 
2872 out:
2873 	return ret;
2874 }
2875 
2876 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
2877 				   int ssid_num,
2878 				   struct rtw89_mac_chinfo *ch_info)
2879 {
2880 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
2881 	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
2882 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
2883 	struct cfg80211_scan_request *req = rtwvif->scan_req;
2884 	struct rtw89_pktofld_info *info;
2885 	u8 band, probe_count = 0;
2886 	int ret;
2887 
2888 	ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
2889 	ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
2890 	ch_info->bw = RTW89_SCAN_WIDTH;
2891 	ch_info->tx_pkt = true;
2892 	ch_info->cfg_tx_pwr = false;
2893 	ch_info->tx_pwr_idx = 0;
2894 	ch_info->tx_null = false;
2895 	ch_info->pause_data = false;
2896 	ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
2897 
2898 	if (ch_info->ch_band == RTW89_BAND_6G) {
2899 		if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
2900 		    !ch_info->is_psc) {
2901 			ch_info->tx_pkt = false;
2902 			if (!req->duration_mandatory)
2903 				ch_info->period -= RTW89_DWELL_TIME_6G;
2904 		}
2905 	}
2906 
2907 	ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info);
2908 	if (ret)
2909 		rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
2910 
2911 	if (ssid_num) {
2912 		band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
2913 
2914 		list_for_each_entry(info, &scan_info->pkt_list[band], list) {
2915 			if (info->channel_6ghz &&
2916 			    ch_info->pri_ch != info->channel_6ghz)
2917 				continue;
2918 			ch_info->pkt_id[probe_count++] = info->id;
2919 			if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
2920 				break;
2921 		}
2922 		ch_info->num_pkt = probe_count;
2923 	}
2924 
2925 	switch (chan_type) {
2926 	case RTW89_CHAN_OPERATE:
2927 		ch_info->central_ch = scan_info->op_chan;
2928 		ch_info->pri_ch = scan_info->op_pri_ch;
2929 		ch_info->ch_band = scan_info->op_band;
2930 		ch_info->bw = scan_info->op_bw;
2931 		ch_info->tx_null = true;
2932 		ch_info->num_pkt = 0;
2933 		break;
2934 	case RTW89_CHAN_DFS:
2935 		if (ch_info->ch_band != RTW89_BAND_6G)
2936 			ch_info->period = max_t(u8, ch_info->period,
2937 						RTW89_DFS_CHAN_TIME);
2938 		ch_info->dwell_time = RTW89_DWELL_TIME;
2939 		break;
2940 	case RTW89_CHAN_ACTIVE:
2941 		break;
2942 	default:
2943 		rtw89_err(rtwdev, "Channel type out of bound\n");
2944 	}
2945 }
2946 
2947 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
2948 				       struct rtw89_vif *rtwvif)
2949 {
2950 	struct cfg80211_scan_request *req = rtwvif->scan_req;
2951 	struct rtw89_mac_chinfo	*ch_info, *tmp;
2952 	struct ieee80211_channel *channel;
2953 	struct list_head chan_list;
2954 	bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
2955 	int list_len, off_chan_time = 0;
2956 	enum rtw89_chan_type type;
2957 	int ret = 0;
2958 	u32 idx;
2959 
2960 	INIT_LIST_HEAD(&chan_list);
2961 	for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
2962 	     idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
2963 	     idx++, list_len++) {
2964 		channel = req->channels[idx];
2965 		ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
2966 		if (!ch_info) {
2967 			ret = -ENOMEM;
2968 			goto out;
2969 		}
2970 
2971 		if (req->duration_mandatory)
2972 			ch_info->period = req->duration;
2973 		else if (channel->band == NL80211_BAND_6GHZ)
2974 			ch_info->period = RTW89_CHANNEL_TIME_6G +
2975 					  RTW89_DWELL_TIME_6G;
2976 		else
2977 			ch_info->period = RTW89_CHANNEL_TIME;
2978 
2979 		ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
2980 		ch_info->central_ch = channel->hw_value;
2981 		ch_info->pri_ch = channel->hw_value;
2982 		ch_info->rand_seq_num = random_seq;
2983 		ch_info->is_psc = cfg80211_channel_is_psc(channel);
2984 
2985 		if (channel->flags &
2986 		    (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
2987 			type = RTW89_CHAN_DFS;
2988 		else
2989 			type = RTW89_CHAN_ACTIVE;
2990 		rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
2991 
2992 		if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK &&
2993 		    off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
2994 			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2995 			if (!tmp) {
2996 				ret = -ENOMEM;
2997 				kfree(ch_info);
2998 				goto out;
2999 			}
3000 
3001 			type = RTW89_CHAN_OPERATE;
3002 			tmp->period = req->duration_mandatory ?
3003 				      req->duration : RTW89_CHANNEL_TIME;
3004 			rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
3005 			list_add_tail(&tmp->list, &chan_list);
3006 			off_chan_time = 0;
3007 			list_len++;
3008 		}
3009 		list_add_tail(&ch_info->list, &chan_list);
3010 		off_chan_time += ch_info->period;
3011 	}
3012 	rtwdev->scan_info.last_chan_idx = idx;
3013 	ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
3014 
3015 out:
3016 	list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
3017 		list_del(&ch_info->list);
3018 		kfree(ch_info);
3019 	}
3020 
3021 	return ret;
3022 }
3023 
3024 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
3025 				   struct rtw89_vif *rtwvif)
3026 {
3027 	int ret;
3028 
3029 	ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
3030 	if (ret) {
3031 		rtw89_err(rtwdev, "Update probe request failed\n");
3032 		goto out;
3033 	}
3034 	ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif);
3035 out:
3036 	return ret;
3037 }
3038 
3039 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3040 			 struct ieee80211_scan_request *scan_req)
3041 {
3042 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
3043 	struct cfg80211_scan_request *req = &scan_req->req;
3044 	u32 rx_fltr = rtwdev->hal.rx_fltr;
3045 	u8 mac_addr[ETH_ALEN];
3046 
3047 	rtwdev->scan_info.scanning_vif = vif;
3048 	rtwdev->scan_info.last_chan_idx = 0;
3049 	rtwvif->scan_ies = &scan_req->ies;
3050 	rtwvif->scan_req = req;
3051 	ieee80211_stop_queues(rtwdev->hw);
3052 
3053 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
3054 		get_random_mask_addr(mac_addr, req->mac_addr,
3055 				     req->mac_addr_mask);
3056 	else
3057 		ether_addr_copy(mac_addr, vif->addr);
3058 	rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
3059 
3060 	rx_fltr &= ~B_AX_A_BCN_CHK_EN;
3061 	rx_fltr &= ~B_AX_A_BC;
3062 	rx_fltr &= ~B_AX_A_A1_MATCH;
3063 	rtw89_write32_mask(rtwdev,
3064 			   rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
3065 			   B_AX_RX_FLTR_CFG_MASK,
3066 			   rx_fltr);
3067 }
3068 
3069 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3070 			    bool aborted)
3071 {
3072 	struct cfg80211_scan_info info = {
3073 		.aborted = aborted,
3074 	};
3075 	struct rtw89_vif *rtwvif;
3076 
3077 	if (!vif)
3078 		return;
3079 
3080 	rtw89_write32_mask(rtwdev,
3081 			   rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
3082 			   B_AX_RX_FLTR_CFG_MASK,
3083 			   rtwdev->hal.rx_fltr);
3084 
3085 	rtw89_core_scan_complete(rtwdev, vif, true);
3086 	ieee80211_scan_completed(rtwdev->hw, &info);
3087 	ieee80211_wake_queues(rtwdev->hw);
3088 
3089 	rtw89_release_pkt_list(rtwdev);
3090 	rtwvif = (struct rtw89_vif *)vif->drv_priv;
3091 	rtwvif->scan_req = NULL;
3092 	rtwvif->scan_ies = NULL;
3093 	rtwdev->scan_info.last_chan_idx = 0;
3094 	rtwdev->scan_info.scanning_vif = NULL;
3095 
3096 	if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK)
3097 		rtw89_store_op_chan(rtwdev, false);
3098 	rtw89_set_channel(rtwdev);
3099 }
3100 
3101 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
3102 {
3103 	rtw89_hw_scan_offload(rtwdev, vif, false);
3104 	rtw89_hw_scan_complete(rtwdev, vif, true);
3105 }
3106 
3107 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
3108 			  bool enable)
3109 {
3110 	struct rtw89_scan_option opt = {0};
3111 	struct rtw89_vif *rtwvif;
3112 	int ret = 0;
3113 
3114 	rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
3115 	if (!rtwvif)
3116 		return -EINVAL;
3117 
3118 	opt.enable = enable;
3119 	opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
3120 	if (enable) {
3121 		ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif);
3122 		if (ret)
3123 			goto out;
3124 	}
3125 	ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
3126 out:
3127 	return ret;
3128 }
3129 
3130 void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup)
3131 {
3132 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
3133 	const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
3134 	struct rtw89_chan new;
3135 
3136 	if (backup) {
3137 		scan_info->op_pri_ch = cur->primary_channel;
3138 		scan_info->op_chan = cur->channel;
3139 		scan_info->op_bw = cur->band_width;
3140 		scan_info->op_band = cur->band_type;
3141 	} else {
3142 		rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch,
3143 				  scan_info->op_band, scan_info->op_bw);
3144 		rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new);
3145 	}
3146 }
3147 
3148 #define H2C_FW_CPU_EXCEPTION_LEN 4
3149 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
3150 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
3151 {
3152 	struct sk_buff *skb;
3153 	int ret;
3154 
3155 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
3156 	if (!skb) {
3157 		rtw89_err(rtwdev,
3158 			  "failed to alloc skb for fw cpu exception\n");
3159 		return -ENOMEM;
3160 	}
3161 
3162 	skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
3163 	RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
3164 					   H2C_FW_CPU_EXCEPTION_TYPE_DEF);
3165 
3166 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3167 			      H2C_CAT_TEST,
3168 			      H2C_CL_FW_STATUS_TEST,
3169 			      H2C_FUNC_CPU_EXCEPTION, 0, 0,
3170 			      H2C_FW_CPU_EXCEPTION_LEN);
3171 
3172 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3173 	if (ret) {
3174 		rtw89_err(rtwdev, "failed to send h2c\n");
3175 		goto fail;
3176 	}
3177 
3178 	return 0;
3179 
3180 fail:
3181 	dev_kfree_skb_any(skb);
3182 	return ret;
3183 }
3184 
3185 #define H2C_PKT_DROP_LEN 24
3186 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
3187 			  const struct rtw89_pkt_drop_params *params)
3188 {
3189 	struct sk_buff *skb;
3190 	int ret;
3191 
3192 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
3193 	if (!skb) {
3194 		rtw89_err(rtwdev,
3195 			  "failed to alloc skb for packet drop\n");
3196 		return -ENOMEM;
3197 	}
3198 
3199 	switch (params->sel) {
3200 	case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
3201 	case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
3202 	case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
3203 	case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
3204 	case RTW89_PKT_DROP_SEL_BAND_ONCE:
3205 		break;
3206 	default:
3207 		rtw89_debug(rtwdev, RTW89_DBG_FW,
3208 			    "H2C of pkt drop might not fully support sel: %d yet\n",
3209 			    params->sel);
3210 		break;
3211 	}
3212 
3213 	skb_put(skb, H2C_PKT_DROP_LEN);
3214 	RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
3215 	RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
3216 	RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
3217 	RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
3218 	RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
3219 	RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
3220 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
3221 						  params->macid_band_sel[0]);
3222 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
3223 						  params->macid_band_sel[1]);
3224 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
3225 						  params->macid_band_sel[2]);
3226 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
3227 						  params->macid_band_sel[3]);
3228 
3229 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3230 			      H2C_CAT_MAC,
3231 			      H2C_CL_MAC_FW_OFLD,
3232 			      H2C_FUNC_PKT_DROP, 0, 0,
3233 			      H2C_PKT_DROP_LEN);
3234 
3235 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3236 	if (ret) {
3237 		rtw89_err(rtwdev, "failed to send h2c\n");
3238 		goto fail;
3239 	}
3240 
3241 	return 0;
3242 
3243 fail:
3244 	dev_kfree_skb_any(skb);
3245 	return ret;
3246 }
3247 
3248 #define H2C_KEEP_ALIVE_LEN 4
3249 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
3250 			    bool enable)
3251 {
3252 	struct sk_buff *skb;
3253 	u8 pkt_id = 0;
3254 	int ret;
3255 
3256 	if (enable) {
3257 		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
3258 						   RTW89_PKT_OFLD_TYPE_NULL_DATA,
3259 						   &pkt_id);
3260 		if (ret)
3261 			return -EPERM;
3262 	}
3263 
3264 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
3265 	if (!skb) {
3266 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3267 		return -ENOMEM;
3268 	}
3269 
3270 	skb_put(skb, H2C_KEEP_ALIVE_LEN);
3271 
3272 	RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
3273 	RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
3274 	RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
3275 	RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id);
3276 
3277 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3278 			      H2C_CAT_MAC,
3279 			      H2C_CL_MAC_WOW,
3280 			      H2C_FUNC_KEEP_ALIVE, 0, 1,
3281 			      H2C_KEEP_ALIVE_LEN);
3282 
3283 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3284 	if (ret) {
3285 		rtw89_err(rtwdev, "failed to send h2c\n");
3286 		goto fail;
3287 	}
3288 
3289 	return 0;
3290 
3291 fail:
3292 	dev_kfree_skb_any(skb);
3293 
3294 	return ret;
3295 }
3296 
3297 #define H2C_DISCONNECT_DETECT_LEN 8
3298 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
3299 				   struct rtw89_vif *rtwvif, bool enable)
3300 {
3301 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
3302 	struct sk_buff *skb;
3303 	u8 macid = rtwvif->mac_id;
3304 	int ret;
3305 
3306 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
3307 	if (!skb) {
3308 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3309 		return -ENOMEM;
3310 	}
3311 
3312 	skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
3313 
3314 	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
3315 		RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
3316 		RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
3317 		RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
3318 		RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
3319 		RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
3320 	}
3321 
3322 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3323 			      H2C_CAT_MAC,
3324 			      H2C_CL_MAC_WOW,
3325 			      H2C_FUNC_DISCONNECT_DETECT, 0, 1,
3326 			      H2C_DISCONNECT_DETECT_LEN);
3327 
3328 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3329 	if (ret) {
3330 		rtw89_err(rtwdev, "failed to send h2c\n");
3331 		goto fail;
3332 	}
3333 
3334 	return 0;
3335 
3336 fail:
3337 	dev_kfree_skb_any(skb);
3338 
3339 	return ret;
3340 }
3341 
3342 #define H2C_WOW_GLOBAL_LEN 8
3343 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
3344 			    bool enable)
3345 {
3346 	struct sk_buff *skb;
3347 	u8 macid = rtwvif->mac_id;
3348 	int ret;
3349 
3350 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
3351 	if (!skb) {
3352 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3353 		return -ENOMEM;
3354 	}
3355 
3356 	skb_put(skb, H2C_WOW_GLOBAL_LEN);
3357 
3358 	RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
3359 	RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
3360 
3361 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3362 			      H2C_CAT_MAC,
3363 			      H2C_CL_MAC_WOW,
3364 			      H2C_FUNC_WOW_GLOBAL, 0, 1,
3365 			      H2C_WOW_GLOBAL_LEN);
3366 
3367 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3368 	if (ret) {
3369 		rtw89_err(rtwdev, "failed to send h2c\n");
3370 		goto fail;
3371 	}
3372 
3373 	return 0;
3374 
3375 fail:
3376 	dev_kfree_skb_any(skb);
3377 
3378 	return ret;
3379 }
3380 
3381 #define H2C_WAKEUP_CTRL_LEN 4
3382 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
3383 				 struct rtw89_vif *rtwvif,
3384 				 bool enable)
3385 {
3386 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
3387 	struct sk_buff *skb;
3388 	u8 macid = rtwvif->mac_id;
3389 	int ret;
3390 
3391 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
3392 	if (!skb) {
3393 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3394 		return -ENOMEM;
3395 	}
3396 
3397 	skb_put(skb, H2C_WAKEUP_CTRL_LEN);
3398 
3399 	if (rtw_wow->pattern_cnt)
3400 		RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
3401 	if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
3402 		RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
3403 	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
3404 		RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
3405 
3406 	RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
3407 
3408 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3409 			      H2C_CAT_MAC,
3410 			      H2C_CL_MAC_WOW,
3411 			      H2C_FUNC_WAKEUP_CTRL, 0, 1,
3412 			      H2C_WAKEUP_CTRL_LEN);
3413 
3414 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3415 	if (ret) {
3416 		rtw89_err(rtwdev, "failed to send h2c\n");
3417 		goto fail;
3418 	}
3419 
3420 	return 0;
3421 
3422 fail:
3423 	dev_kfree_skb_any(skb);
3424 
3425 	return ret;
3426 }
3427 
3428 #define H2C_WOW_CAM_UPD_LEN 24
3429 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
3430 			    struct rtw89_wow_cam_info *cam_info)
3431 {
3432 	struct sk_buff *skb;
3433 	int ret;
3434 
3435 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
3436 	if (!skb) {
3437 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
3438 		return -ENOMEM;
3439 	}
3440 
3441 	skb_put(skb, H2C_WOW_CAM_UPD_LEN);
3442 
3443 	RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
3444 	RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
3445 	if (cam_info->valid) {
3446 		RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
3447 		RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
3448 		RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
3449 		RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
3450 		RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
3451 		RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
3452 							     cam_info->negative_pattern_match);
3453 		RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
3454 						   cam_info->skip_mac_hdr);
3455 		RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
3456 		RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
3457 		RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
3458 	}
3459 	RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
3460 
3461 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3462 			      H2C_CAT_MAC,
3463 			      H2C_CL_MAC_WOW,
3464 			      H2C_FUNC_WOW_CAM_UPD, 0, 1,
3465 			      H2C_WOW_CAM_UPD_LEN);
3466 
3467 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3468 	if (ret) {
3469 		rtw89_err(rtwdev, "failed to send h2c\n");
3470 		goto fail;
3471 	}
3472 
3473 	return 0;
3474 fail:
3475 	dev_kfree_skb_any(skb);
3476 
3477 	return ret;
3478 }
3479 
3480 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
3481 				 struct rtw89_wait_info *wait, unsigned int cond)
3482 {
3483 	int ret;
3484 
3485 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3486 	if (ret) {
3487 		rtw89_err(rtwdev, "failed to send h2c\n");
3488 		dev_kfree_skb_any(skb);
3489 		return -EBUSY;
3490 	}
3491 
3492 	return rtw89_wait_for_cond(wait, cond);
3493 }
3494 
3495 #define H2C_ADD_MCC_LEN 16
3496 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
3497 			 const struct rtw89_fw_mcc_add_req *p)
3498 {
3499 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3500 	struct sk_buff *skb;
3501 	unsigned int cond;
3502 
3503 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
3504 	if (!skb) {
3505 		rtw89_err(rtwdev,
3506 			  "failed to alloc skb for add mcc\n");
3507 		return -ENOMEM;
3508 	}
3509 
3510 	skb_put(skb, H2C_ADD_MCC_LEN);
3511 	RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
3512 	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
3513 	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
3514 	RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
3515 	RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
3516 	RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
3517 	RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
3518 	RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
3519 	RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
3520 	RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
3521 	RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
3522 	RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
3523 	RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
3524 	RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
3525 	RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
3526 	RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
3527 	RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
3528 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
3529 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
3530 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
3531 
3532 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3533 			      H2C_CAT_MAC,
3534 			      H2C_CL_MCC,
3535 			      H2C_FUNC_ADD_MCC, 0, 0,
3536 			      H2C_ADD_MCC_LEN);
3537 
3538 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
3539 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3540 }
3541 
3542 #define H2C_START_MCC_LEN 12
3543 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
3544 			   const struct rtw89_fw_mcc_start_req *p)
3545 {
3546 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3547 	struct sk_buff *skb;
3548 	unsigned int cond;
3549 
3550 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
3551 	if (!skb) {
3552 		rtw89_err(rtwdev,
3553 			  "failed to alloc skb for start mcc\n");
3554 		return -ENOMEM;
3555 	}
3556 
3557 	skb_put(skb, H2C_START_MCC_LEN);
3558 	RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
3559 	RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
3560 	RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
3561 	RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
3562 	RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
3563 	RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
3564 	RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
3565 	RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
3566 	RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
3567 
3568 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3569 			      H2C_CAT_MAC,
3570 			      H2C_CL_MCC,
3571 			      H2C_FUNC_START_MCC, 0, 0,
3572 			      H2C_START_MCC_LEN);
3573 
3574 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
3575 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3576 }
3577 
3578 #define H2C_STOP_MCC_LEN 4
3579 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
3580 			  bool prev_groups)
3581 {
3582 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3583 	struct sk_buff *skb;
3584 	unsigned int cond;
3585 
3586 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
3587 	if (!skb) {
3588 		rtw89_err(rtwdev,
3589 			  "failed to alloc skb for stop mcc\n");
3590 		return -ENOMEM;
3591 	}
3592 
3593 	skb_put(skb, H2C_STOP_MCC_LEN);
3594 	RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
3595 	RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
3596 	RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
3597 
3598 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3599 			      H2C_CAT_MAC,
3600 			      H2C_CL_MCC,
3601 			      H2C_FUNC_STOP_MCC, 0, 0,
3602 			      H2C_STOP_MCC_LEN);
3603 
3604 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
3605 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3606 }
3607 
3608 #define H2C_DEL_MCC_GROUP_LEN 4
3609 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
3610 			       bool prev_groups)
3611 {
3612 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3613 	struct sk_buff *skb;
3614 	unsigned int cond;
3615 
3616 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
3617 	if (!skb) {
3618 		rtw89_err(rtwdev,
3619 			  "failed to alloc skb for del mcc group\n");
3620 		return -ENOMEM;
3621 	}
3622 
3623 	skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
3624 	RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
3625 	RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
3626 
3627 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3628 			      H2C_CAT_MAC,
3629 			      H2C_CL_MCC,
3630 			      H2C_FUNC_DEL_MCC_GROUP, 0, 0,
3631 			      H2C_DEL_MCC_GROUP_LEN);
3632 
3633 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
3634 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3635 }
3636 
3637 #define H2C_RESET_MCC_GROUP_LEN 4
3638 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
3639 {
3640 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3641 	struct sk_buff *skb;
3642 	unsigned int cond;
3643 
3644 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
3645 	if (!skb) {
3646 		rtw89_err(rtwdev,
3647 			  "failed to alloc skb for reset mcc group\n");
3648 		return -ENOMEM;
3649 	}
3650 
3651 	skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
3652 	RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
3653 
3654 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3655 			      H2C_CAT_MAC,
3656 			      H2C_CL_MCC,
3657 			      H2C_FUNC_RESET_MCC_GROUP, 0, 0,
3658 			      H2C_RESET_MCC_GROUP_LEN);
3659 
3660 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
3661 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3662 }
3663 
3664 #define H2C_MCC_REQ_TSF_LEN 4
3665 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
3666 			     const struct rtw89_fw_mcc_tsf_req *req,
3667 			     struct rtw89_mac_mcc_tsf_rpt *rpt)
3668 {
3669 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3670 	struct rtw89_mac_mcc_tsf_rpt *tmp;
3671 	struct sk_buff *skb;
3672 	unsigned int cond;
3673 	int ret;
3674 
3675 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
3676 	if (!skb) {
3677 		rtw89_err(rtwdev,
3678 			  "failed to alloc skb for mcc req tsf\n");
3679 		return -ENOMEM;
3680 	}
3681 
3682 	skb_put(skb, H2C_MCC_REQ_TSF_LEN);
3683 	RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
3684 	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
3685 	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
3686 
3687 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3688 			      H2C_CAT_MAC,
3689 			      H2C_CL_MCC,
3690 			      H2C_FUNC_MCC_REQ_TSF, 0, 0,
3691 			      H2C_MCC_REQ_TSF_LEN);
3692 
3693 	cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
3694 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3695 	if (ret)
3696 		return ret;
3697 
3698 	tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
3699 	*rpt = *tmp;
3700 
3701 	return 0;
3702 }
3703 
3704 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
3705 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid,
3706 				  u8 *bitmap)
3707 {
3708 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3709 	struct sk_buff *skb;
3710 	unsigned int cond;
3711 	u8 map_len;
3712 	u8 h2c_len;
3713 
3714 	BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
3715 	map_len = RTW89_MAX_MAC_ID_NUM / 8;
3716 	h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
3717 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
3718 	if (!skb) {
3719 		rtw89_err(rtwdev,
3720 			  "failed to alloc skb for mcc macid bitmap\n");
3721 		return -ENOMEM;
3722 	}
3723 
3724 	skb_put(skb, h2c_len);
3725 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
3726 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
3727 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
3728 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
3729 
3730 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3731 			      H2C_CAT_MAC,
3732 			      H2C_CL_MCC,
3733 			      H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
3734 			      h2c_len);
3735 
3736 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
3737 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3738 }
3739 
3740 #define H2C_MCC_SYNC_LEN 4
3741 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
3742 			  u8 target, u8 offset)
3743 {
3744 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3745 	struct sk_buff *skb;
3746 	unsigned int cond;
3747 
3748 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
3749 	if (!skb) {
3750 		rtw89_err(rtwdev,
3751 			  "failed to alloc skb for mcc sync\n");
3752 		return -ENOMEM;
3753 	}
3754 
3755 	skb_put(skb, H2C_MCC_SYNC_LEN);
3756 	RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
3757 	RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
3758 	RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
3759 	RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
3760 
3761 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3762 			      H2C_CAT_MAC,
3763 			      H2C_CL_MCC,
3764 			      H2C_FUNC_MCC_SYNC, 0, 0,
3765 			      H2C_MCC_SYNC_LEN);
3766 
3767 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
3768 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3769 }
3770 
3771 #define H2C_MCC_SET_DURATION_LEN 20
3772 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
3773 				  const struct rtw89_fw_mcc_duration *p)
3774 {
3775 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
3776 	struct sk_buff *skb;
3777 	unsigned int cond;
3778 
3779 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
3780 	if (!skb) {
3781 		rtw89_err(rtwdev,
3782 			  "failed to alloc skb for mcc set duration\n");
3783 		return -ENOMEM;
3784 	}
3785 
3786 	skb_put(skb, H2C_MCC_SET_DURATION_LEN);
3787 	RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
3788 	RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
3789 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
3790 	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
3791 	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
3792 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
3793 						       p->start_tsf_low);
3794 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
3795 							p->start_tsf_high);
3796 	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
3797 	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
3798 
3799 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3800 			      H2C_CAT_MAC,
3801 			      H2C_CL_MCC,
3802 			      H2C_FUNC_MCC_SET_DURATION, 0, 0,
3803 			      H2C_MCC_SET_DURATION_LEN);
3804 
3805 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
3806 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3807 }
3808