xref: /linux/drivers/net/wireless/realtek/rtw89/fw.c (revision bdce82e960d1205d118662f575cec39379984e34)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include "cam.h"
6 #include "chan.h"
7 #include "coex.h"
8 #include "debug.h"
9 #include "fw.h"
10 #include "mac.h"
11 #include "phy.h"
12 #include "ps.h"
13 #include "reg.h"
14 #include "util.h"
15 
16 union rtw89_fw_element_arg {
17 	size_t offset;
18 	enum rtw89_rf_path rf_path;
19 	enum rtw89_fw_type fw_type;
20 };
21 
22 struct rtw89_fw_element_handler {
23 	int (*fn)(struct rtw89_dev *rtwdev,
24 		  const struct rtw89_fw_element_hdr *elm,
25 		  const union rtw89_fw_element_arg arg);
26 	const union rtw89_fw_element_arg arg;
27 	const char *name;
28 };
29 
30 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
31 				    struct sk_buff *skb);
32 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
33 				 struct rtw89_wait_info *wait, unsigned int cond);
34 
35 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
36 					      bool header)
37 {
38 	struct sk_buff *skb;
39 	u32 header_len = 0;
40 	u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
41 
42 	if (header)
43 		header_len = H2C_HEADER_LEN;
44 
45 	skb = dev_alloc_skb(len + header_len + h2c_desc_size);
46 	if (!skb)
47 		return NULL;
48 	skb_reserve(skb, header_len + h2c_desc_size);
49 	memset(skb->data, 0, len);
50 
51 	return skb;
52 }
53 
54 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
55 {
56 	return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
57 }
58 
59 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
60 {
61 	return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
62 }
63 
64 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
65 {
66 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
67 	u8 val;
68 	int ret;
69 
70 	ret = read_poll_timeout_atomic(mac->fwdl_get_status, val,
71 				       val == RTW89_FWDL_WCPU_FW_INIT_RDY,
72 				       1, FWDL_WAIT_CNT, false, rtwdev, type);
73 	if (ret) {
74 		switch (val) {
75 		case RTW89_FWDL_CHECKSUM_FAIL:
76 			rtw89_err(rtwdev, "fw checksum fail\n");
77 			return -EINVAL;
78 
79 		case RTW89_FWDL_SECURITY_FAIL:
80 			rtw89_err(rtwdev, "fw security fail\n");
81 			return -EINVAL;
82 
83 		case RTW89_FWDL_CV_NOT_MATCH:
84 			rtw89_err(rtwdev, "fw cv not match\n");
85 			return -EINVAL;
86 
87 		default:
88 			rtw89_err(rtwdev, "fw unexpected status %d\n", val);
89 			return -EBUSY;
90 		}
91 	}
92 
93 	set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
94 
95 	return 0;
96 }
97 
98 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
99 				  struct rtw89_fw_bin_info *info)
100 {
101 	const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
102 	struct rtw89_fw_hdr_section_info *section_info;
103 	const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
104 	const struct rtw89_fw_hdr_section *section;
105 	const u8 *fw_end = fw + len;
106 	const u8 *bin;
107 	u32 base_hdr_len;
108 	u32 mssc_len = 0;
109 	u32 i;
110 
111 	if (!info)
112 		return -EINVAL;
113 
114 	info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
115 	base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
116 	info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
117 
118 	if (info->dynamic_hdr_en) {
119 		info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
120 		info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
121 		fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
122 		if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
123 			rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
124 			return -EINVAL;
125 		}
126 	} else {
127 		info->hdr_len = base_hdr_len;
128 		info->dynamic_hdr_len = 0;
129 	}
130 
131 	bin = fw + info->hdr_len;
132 
133 	/* jump to section header */
134 	section_info = info->section_info;
135 	for (i = 0; i < info->section_num; i++) {
136 		section = &fw_hdr->sections[i];
137 		section_info->type =
138 			le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
139 		if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
140 			section_info->mssc =
141 				le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
142 			mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
143 		} else {
144 			section_info->mssc = 0;
145 		}
146 
147 		section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
148 		if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
149 			section_info->len += FWDL_SECTION_CHKSUM_LEN;
150 		section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
151 		section_info->dladdr =
152 			le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
153 		section_info->addr = bin;
154 		bin += section_info->len;
155 		section_info++;
156 	}
157 
158 	if (fw_end != bin + mssc_len) {
159 		rtw89_err(rtwdev, "[ERR]fw bin size\n");
160 		return -EINVAL;
161 	}
162 
163 	return 0;
164 }
165 
166 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
167 				  struct rtw89_fw_bin_info *info)
168 {
169 	const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
170 	struct rtw89_fw_hdr_section_info *section_info;
171 	const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
172 	const struct rtw89_fw_hdr_section_v1 *section;
173 	const u8 *fw_end = fw + len;
174 	const u8 *bin;
175 	u32 base_hdr_len;
176 	u32 mssc_len = 0;
177 	u32 i;
178 
179 	info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
180 	base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
181 	info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
182 
183 	if (info->dynamic_hdr_en) {
184 		info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
185 		info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
186 		fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
187 		if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
188 			rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
189 			return -EINVAL;
190 		}
191 	} else {
192 		info->hdr_len = base_hdr_len;
193 		info->dynamic_hdr_len = 0;
194 	}
195 
196 	bin = fw + info->hdr_len;
197 
198 	/* jump to section header */
199 	section_info = info->section_info;
200 	for (i = 0; i < info->section_num; i++) {
201 		section = &fw_hdr->sections[i];
202 		section_info->type =
203 			le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
204 		if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
205 			section_info->mssc =
206 				le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
207 			mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
208 		} else {
209 			section_info->mssc = 0;
210 		}
211 
212 		section_info->len =
213 			le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
214 		if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
215 			section_info->len += FWDL_SECTION_CHKSUM_LEN;
216 		section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
217 		section_info->dladdr =
218 			le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
219 		section_info->addr = bin;
220 		bin += section_info->len;
221 		section_info++;
222 	}
223 
224 	if (fw_end != bin + mssc_len) {
225 		rtw89_err(rtwdev, "[ERR]fw bin size\n");
226 		return -EINVAL;
227 	}
228 
229 	return 0;
230 }
231 
232 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
233 			       const struct rtw89_fw_suit *fw_suit,
234 			       struct rtw89_fw_bin_info *info)
235 {
236 	const u8 *fw = fw_suit->data;
237 	u32 len = fw_suit->size;
238 
239 	if (!fw || !len) {
240 		rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
241 		return -ENOENT;
242 	}
243 
244 	switch (fw_suit->hdr_ver) {
245 	case 0:
246 		return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
247 	case 1:
248 		return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
249 	default:
250 		return -ENOENT;
251 	}
252 }
253 
254 static
255 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
256 			struct rtw89_fw_suit *fw_suit, bool nowarn)
257 {
258 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
259 	const struct firmware *firmware = fw_info->req.firmware;
260 	const u8 *mfw = firmware->data;
261 	u32 mfw_len = firmware->size;
262 	const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
263 	const struct rtw89_mfw_info *mfw_info;
264 	int i;
265 
266 	if (mfw_hdr->sig != RTW89_MFW_SIG) {
267 		rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
268 		/* legacy firmware support normal type only */
269 		if (type != RTW89_FW_NORMAL)
270 			return -EINVAL;
271 		fw_suit->data = mfw;
272 		fw_suit->size = mfw_len;
273 		return 0;
274 	}
275 
276 	for (i = 0; i < mfw_hdr->fw_nr; i++) {
277 		mfw_info = &mfw_hdr->info[i];
278 		if (mfw_info->type == type) {
279 			if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp)
280 				goto found;
281 			if (type == RTW89_FW_LOGFMT)
282 				goto found;
283 		}
284 	}
285 
286 	if (!nowarn)
287 		rtw89_err(rtwdev, "no suitable firmware found\n");
288 	return -ENOENT;
289 
290 found:
291 	fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
292 	fw_suit->size = le32_to_cpu(mfw_info->size);
293 	return 0;
294 }
295 
296 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
297 {
298 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
299 	const struct firmware *firmware = fw_info->req.firmware;
300 	const struct rtw89_mfw_hdr *mfw_hdr =
301 		(const struct rtw89_mfw_hdr *)firmware->data;
302 	const struct rtw89_mfw_info *mfw_info;
303 	u32 size;
304 
305 	if (mfw_hdr->sig != RTW89_MFW_SIG) {
306 		rtw89_warn(rtwdev, "not mfw format\n");
307 		return 0;
308 	}
309 
310 	mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
311 	size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
312 
313 	return size;
314 }
315 
316 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
317 				   struct rtw89_fw_suit *fw_suit,
318 				   const struct rtw89_fw_hdr *hdr)
319 {
320 	fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
321 	fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
322 	fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
323 	fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
324 	fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
325 	fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
326 	fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
327 	fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
328 	fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
329 	fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
330 	fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
331 }
332 
333 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
334 				   struct rtw89_fw_suit *fw_suit,
335 				   const struct rtw89_fw_hdr_v1 *hdr)
336 {
337 	fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
338 	fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
339 	fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
340 	fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
341 	fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
342 	fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
343 	fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
344 	fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
345 	fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
346 	fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
347 	fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
348 }
349 
350 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
351 			       enum rtw89_fw_type type,
352 			       struct rtw89_fw_suit *fw_suit)
353 {
354 	const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
355 	const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
356 
357 	if (type == RTW89_FW_LOGFMT)
358 		return 0;
359 
360 	fw_suit->type = type;
361 	fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
362 
363 	switch (fw_suit->hdr_ver) {
364 	case 0:
365 		rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
366 		break;
367 	case 1:
368 		rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
369 		break;
370 	default:
371 		rtw89_err(rtwdev, "Unknown firmware header version %u\n",
372 			  fw_suit->hdr_ver);
373 		return -ENOENT;
374 	}
375 
376 	rtw89_info(rtwdev,
377 		   "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
378 		   fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
379 		   fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
380 
381 	return 0;
382 }
383 
384 static
385 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
386 			 bool nowarn)
387 {
388 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
389 	int ret;
390 
391 	ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
392 	if (ret)
393 		return ret;
394 
395 	return rtw89_fw_update_ver(rtwdev, type, fw_suit);
396 }
397 
398 static
399 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
400 				  const struct rtw89_fw_element_hdr *elm,
401 				  const union rtw89_fw_element_arg arg)
402 {
403 	enum rtw89_fw_type type = arg.fw_type;
404 	struct rtw89_hal *hal = &rtwdev->hal;
405 	struct rtw89_fw_suit *fw_suit;
406 
407 	if (hal->cv != elm->u.bbmcu.cv)
408 		return 1; /* ignore this element */
409 
410 	fw_suit = rtw89_fw_suit_get(rtwdev, type);
411 	fw_suit->data = elm->u.bbmcu.contents;
412 	fw_suit->size = le32_to_cpu(elm->size);
413 
414 	return rtw89_fw_update_ver(rtwdev, type, fw_suit);
415 }
416 
417 #define __DEF_FW_FEAT_COND(__cond, __op) \
418 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
419 { \
420 	return suit_ver_code __op comp_ver_code; \
421 }
422 
423 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
424 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
425 __DEF_FW_FEAT_COND(lt, <); /* less than */
426 
427 struct __fw_feat_cfg {
428 	enum rtw89_core_chip_id chip_id;
429 	enum rtw89_fw_feature feature;
430 	u32 ver_code;
431 	bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
432 };
433 
434 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
435 	{ \
436 		.chip_id = _chip, \
437 		.feature = RTW89_FW_FEATURE_ ## _feat, \
438 		.ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
439 		.cond = __fw_feat_cond_ ## _cond, \
440 	}
441 
442 static const struct __fw_feat_cfg fw_feat_tbl[] = {
443 	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
444 	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
445 	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER),
446 	__CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
447 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
448 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
449 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
450 	__CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
451 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
452 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
453 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
454 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
455 	__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
456 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
457 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
458 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
459 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
460 	__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
461 	__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
462 };
463 
464 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
465 					 const struct rtw89_chip_info *chip,
466 					 u32 ver_code)
467 {
468 	int i;
469 
470 	for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
471 		const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
472 
473 		if (chip->chip_id != ent->chip_id)
474 			continue;
475 
476 		if (ent->cond(ver_code, ent->ver_code))
477 			RTW89_SET_FW_FEATURE(ent->feature, fw);
478 	}
479 }
480 
481 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
482 {
483 	const struct rtw89_chip_info *chip = rtwdev->chip;
484 	const struct rtw89_fw_suit *fw_suit;
485 	u32 suit_ver_code;
486 
487 	fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
488 	suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
489 
490 	rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
491 }
492 
493 const struct firmware *
494 rtw89_early_fw_feature_recognize(struct device *device,
495 				 const struct rtw89_chip_info *chip,
496 				 struct rtw89_fw_info *early_fw,
497 				 int *used_fw_format)
498 {
499 	const struct firmware *firmware;
500 	char fw_name[64];
501 	int fw_format;
502 	u32 ver_code;
503 	int ret;
504 
505 	for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
506 		rtw89_fw_get_filename(fw_name, sizeof(fw_name),
507 				      chip->fw_basename, fw_format);
508 
509 		ret = request_firmware(&firmware, fw_name, device);
510 		if (!ret) {
511 			dev_info(device, "loaded firmware %s\n", fw_name);
512 			*used_fw_format = fw_format;
513 			break;
514 		}
515 	}
516 
517 	if (ret) {
518 		dev_err(device, "failed to early request firmware: %d\n", ret);
519 		return NULL;
520 	}
521 
522 	ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
523 
524 	if (!ver_code)
525 		goto out;
526 
527 	rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
528 
529 out:
530 	return firmware;
531 }
532 
533 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
534 {
535 	const struct rtw89_chip_info *chip = rtwdev->chip;
536 	int ret;
537 
538 	if (chip->try_ce_fw) {
539 		ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
540 		if (!ret)
541 			goto normal_done;
542 	}
543 
544 	ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
545 	if (ret)
546 		return ret;
547 
548 normal_done:
549 	/* It still works if wowlan firmware isn't existing. */
550 	__rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
551 
552 	/* It still works if log format file isn't existing. */
553 	__rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
554 
555 	rtw89_fw_recognize_features(rtwdev);
556 
557 	rtw89_coex_recognize_ver(rtwdev);
558 
559 	return 0;
560 }
561 
562 static
563 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
564 				 const struct rtw89_fw_element_hdr *elm,
565 				 const union rtw89_fw_element_arg arg)
566 {
567 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
568 	struct rtw89_phy_table *tbl;
569 	struct rtw89_reg2_def *regs;
570 	enum rtw89_rf_path rf_path;
571 	u32 n_regs, i;
572 	u8 idx;
573 
574 	tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
575 	if (!tbl)
576 		return -ENOMEM;
577 
578 	switch (le32_to_cpu(elm->id)) {
579 	case RTW89_FW_ELEMENT_ID_BB_REG:
580 		elm_info->bb_tbl = tbl;
581 		break;
582 	case RTW89_FW_ELEMENT_ID_BB_GAIN:
583 		elm_info->bb_gain = tbl;
584 		break;
585 	case RTW89_FW_ELEMENT_ID_RADIO_A:
586 	case RTW89_FW_ELEMENT_ID_RADIO_B:
587 	case RTW89_FW_ELEMENT_ID_RADIO_C:
588 	case RTW89_FW_ELEMENT_ID_RADIO_D:
589 		rf_path = arg.rf_path;
590 		idx = elm->u.reg2.idx;
591 
592 		elm_info->rf_radio[idx] = tbl;
593 		tbl->rf_path = rf_path;
594 		tbl->config = rtw89_phy_config_rf_reg_v1;
595 		break;
596 	case RTW89_FW_ELEMENT_ID_RF_NCTL:
597 		elm_info->rf_nctl = tbl;
598 		break;
599 	default:
600 		kfree(tbl);
601 		return -ENOENT;
602 	}
603 
604 	n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
605 	regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL);
606 	if (!regs)
607 		goto out;
608 
609 	for (i = 0; i < n_regs; i++) {
610 		regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
611 		regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
612 	}
613 
614 	tbl->n_regs = n_regs;
615 	tbl->regs = regs;
616 
617 	return 0;
618 
619 out:
620 	kfree(tbl);
621 	return -ENOMEM;
622 }
623 
624 static
625 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev,
626 				      const struct rtw89_fw_element_hdr *elm,
627 				      const union rtw89_fw_element_arg arg)
628 {
629 	const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr;
630 	const unsigned long offset = arg.offset;
631 	struct rtw89_efuse *efuse = &rtwdev->efuse;
632 	struct rtw89_txpwr_conf *conf;
633 
634 	if (!rtwdev->rfe_data) {
635 		rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL);
636 		if (!rtwdev->rfe_data)
637 			return -ENOMEM;
638 	}
639 
640 	conf = (void *)rtwdev->rfe_data + offset;
641 
642 	/* if multiple matched, take the last eventually */
643 	if (txpwr_elm->rfe_type == efuse->rfe_type)
644 		goto setup;
645 
646 	/* without one is matched, accept default */
647 	if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE &&
648 	    (!rtw89_txpwr_conf_valid(conf) ||
649 	     conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE))
650 		goto setup;
651 
652 	rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n",
653 		    elm->id, txpwr_elm->rfe_type);
654 	return 0;
655 
656 setup:
657 	rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n",
658 		    elm->id, txpwr_elm->rfe_type);
659 
660 	conf->rfe_type = txpwr_elm->rfe_type;
661 	conf->ent_sz = txpwr_elm->ent_sz;
662 	conf->num_ents = le32_to_cpu(txpwr_elm->num_ents);
663 	conf->data = txpwr_elm->content;
664 	return 0;
665 }
666 
667 static
668 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
669 				       const struct rtw89_fw_element_hdr *elm,
670 				       const union rtw89_fw_element_arg arg)
671 {
672 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
673 	const struct rtw89_chip_info *chip = rtwdev->chip;
674 	u32 needed_bitmap = 0;
675 	u32 offset = 0;
676 	int subband;
677 	u32 bitmap;
678 	int type;
679 
680 	if (chip->support_bands & BIT(NL80211_BAND_6GHZ))
681 		needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ;
682 	if (chip->support_bands & BIT(NL80211_BAND_5GHZ))
683 		needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ;
684 	if (chip->support_bands & BIT(NL80211_BAND_2GHZ))
685 		needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ;
686 
687 	bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
688 
689 	if ((bitmap & needed_bitmap) != needed_bitmap) {
690 		rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n",
691 			   needed_bitmap, bitmap);
692 		return -ENOENT;
693 	}
694 
695 	elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL);
696 	if (!elm_info->txpwr_trk)
697 		return -ENOMEM;
698 
699 	for (type = 0; bitmap; type++, bitmap >>= 1) {
700 		if (!(bitmap & BIT(0)))
701 			continue;
702 
703 		if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START &&
704 		    type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX)
705 			subband = 4;
706 		else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START &&
707 			 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX)
708 			subband = 3;
709 		else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START &&
710 			 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX)
711 			subband = 1;
712 		else
713 			break;
714 
715 		elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset];
716 
717 		offset += subband;
718 		if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size))
719 			goto err;
720 	}
721 
722 	return 0;
723 
724 err:
725 	rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n",
726 		   offset, le32_to_cpu(elm->size));
727 	kfree(elm_info->txpwr_trk);
728 	elm_info->txpwr_trk = NULL;
729 
730 	return -EFAULT;
731 }
732 
733 static
734 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev,
735 				     const struct rtw89_fw_element_hdr *elm,
736 				     const union rtw89_fw_element_arg arg)
737 {
738 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
739 	u8 rfk_id;
740 
741 	if (elm_info->rfk_log_fmt)
742 		goto allocated;
743 
744 	elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL);
745 	if (!elm_info->rfk_log_fmt)
746 		return 1; /* this is an optional element, so just ignore this */
747 
748 allocated:
749 	rfk_id = elm->u.rfk_log_fmt.rfk_id;
750 	if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM)
751 		return 1;
752 
753 	elm_info->rfk_log_fmt->elm[rfk_id] = elm;
754 
755 	return 0;
756 }
757 
758 static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
759 	[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
760 					{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
761 	[RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
762 					{ .fw_type = RTW89_FW_BBMCU1 }, NULL},
763 	[RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"},
764 	[RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL},
765 	[RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
766 					 { .rf_path =  RF_PATH_A }, "radio A"},
767 	[RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
768 					 { .rf_path =  RF_PATH_B }, NULL},
769 	[RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
770 					 { .rf_path =  RF_PATH_C }, NULL},
771 	[RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
772 					 { .rf_path =  RF_PATH_D }, NULL},
773 	[RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"},
774 	[RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = {
775 		rtw89_fw_recognize_txpwr_from_elm,
776 		{ .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR",
777 	},
778 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = {
779 		rtw89_fw_recognize_txpwr_from_elm,
780 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL,
781 	},
782 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = {
783 		rtw89_fw_recognize_txpwr_from_elm,
784 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL,
785 	},
786 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = {
787 		rtw89_fw_recognize_txpwr_from_elm,
788 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
789 	},
790 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
791 		rtw89_fw_recognize_txpwr_from_elm,
792 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
793 	},
794 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = {
795 		rtw89_fw_recognize_txpwr_from_elm,
796 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL,
797 	},
798 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = {
799 		rtw89_fw_recognize_txpwr_from_elm,
800 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
801 	},
802 	[RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
803 		rtw89_fw_recognize_txpwr_from_elm,
804 		{ .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
805 	},
806 	[RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = {
807 		rtw89_fw_recognize_txpwr_from_elm,
808 		{ .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL,
809 	},
810 	[RTW89_FW_ELEMENT_ID_TXPWR_TRK] = {
811 		rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK",
812 	},
813 	[RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
814 		rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
815 	},
816 };
817 
818 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
819 {
820 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
821 	const struct firmware *firmware = fw_info->req.firmware;
822 	const struct rtw89_chip_info *chip = rtwdev->chip;
823 	u32 unrecognized_elements = chip->needed_fw_elms;
824 	const struct rtw89_fw_element_handler *handler;
825 	const struct rtw89_fw_element_hdr *hdr;
826 	u32 elm_size;
827 	u32 elem_id;
828 	u32 offset;
829 	int ret;
830 
831 	BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
832 
833 	offset = rtw89_mfw_get_size(rtwdev);
834 	offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
835 	if (offset == 0)
836 		return -EINVAL;
837 
838 	while (offset + sizeof(*hdr) < firmware->size) {
839 		hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
840 
841 		elm_size = le32_to_cpu(hdr->size);
842 		if (offset + elm_size >= firmware->size) {
843 			rtw89_warn(rtwdev, "firmware element size exceeds\n");
844 			break;
845 		}
846 
847 		elem_id = le32_to_cpu(hdr->id);
848 		if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
849 			goto next;
850 
851 		handler = &__fw_element_handlers[elem_id];
852 		if (!handler->fn)
853 			goto next;
854 
855 		ret = handler->fn(rtwdev, hdr, handler->arg);
856 		if (ret == 1) /* ignore this element */
857 			goto next;
858 		if (ret)
859 			return ret;
860 
861 		if (handler->name)
862 			rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
863 				   handler->name, hdr->ver);
864 
865 		unrecognized_elements &= ~BIT(elem_id);
866 next:
867 		offset += sizeof(*hdr) + elm_size;
868 		offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
869 	}
870 
871 	if (unrecognized_elements) {
872 		rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
873 			  unrecognized_elements);
874 		return -ENOENT;
875 	}
876 
877 	return 0;
878 }
879 
880 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
881 			   u8 type, u8 cat, u8 class, u8 func,
882 			   bool rack, bool dack, u32 len)
883 {
884 	struct fwcmd_hdr *hdr;
885 
886 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
887 
888 	if (!(rtwdev->fw.h2c_seq % 4))
889 		rack = true;
890 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
891 				FIELD_PREP(H2C_HDR_CAT, cat) |
892 				FIELD_PREP(H2C_HDR_CLASS, class) |
893 				FIELD_PREP(H2C_HDR_FUNC, func) |
894 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
895 
896 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
897 					   len + H2C_HEADER_LEN) |
898 				(rack ? H2C_HDR_REC_ACK : 0) |
899 				(dack ? H2C_HDR_DONE_ACK : 0));
900 
901 	rtwdev->fw.h2c_seq++;
902 }
903 
904 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
905 				       struct sk_buff *skb,
906 				       u8 type, u8 cat, u8 class, u8 func,
907 				       u32 len)
908 {
909 	struct fwcmd_hdr *hdr;
910 
911 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
912 
913 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
914 				FIELD_PREP(H2C_HDR_CAT, cat) |
915 				FIELD_PREP(H2C_HDR_CLASS, class) |
916 				FIELD_PREP(H2C_HDR_FUNC, func) |
917 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
918 
919 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
920 					   len + H2C_HEADER_LEN));
921 }
922 
923 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
924 {
925 	struct sk_buff *skb;
926 	u32 ret = 0;
927 
928 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
929 	if (!skb) {
930 		rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
931 		return -ENOMEM;
932 	}
933 
934 	skb_put_data(skb, fw, len);
935 	SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
936 	rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
937 				   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
938 				   H2C_FUNC_MAC_FWHDR_DL, len);
939 
940 	ret = rtw89_h2c_tx(rtwdev, skb, false);
941 	if (ret) {
942 		rtw89_err(rtwdev, "failed to send h2c\n");
943 		ret = -1;
944 		goto fail;
945 	}
946 
947 	return 0;
948 fail:
949 	dev_kfree_skb_any(skb);
950 
951 	return ret;
952 }
953 
954 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
955 {
956 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
957 	int ret;
958 
959 	ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
960 	if (ret) {
961 		rtw89_err(rtwdev, "[ERR]FW header download\n");
962 		return ret;
963 	}
964 
965 	ret = mac->fwdl_check_path_ready(rtwdev, false);
966 	if (ret) {
967 		rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
968 		return ret;
969 	}
970 
971 	rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
972 	rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
973 
974 	return 0;
975 }
976 
977 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
978 				    struct rtw89_fw_hdr_section_info *info)
979 {
980 	struct sk_buff *skb;
981 	const u8 *section = info->addr;
982 	u32 residue_len = info->len;
983 	u32 pkt_len;
984 	int ret;
985 
986 	while (residue_len) {
987 		if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
988 			pkt_len = FWDL_SECTION_PER_PKT_LEN;
989 		else
990 			pkt_len = residue_len;
991 
992 		skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
993 		if (!skb) {
994 			rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
995 			return -ENOMEM;
996 		}
997 		skb_put_data(skb, section, pkt_len);
998 
999 		ret = rtw89_h2c_tx(rtwdev, skb, true);
1000 		if (ret) {
1001 			rtw89_err(rtwdev, "failed to send h2c\n");
1002 			ret = -1;
1003 			goto fail;
1004 		}
1005 
1006 		section += pkt_len;
1007 		residue_len -= pkt_len;
1008 	}
1009 
1010 	return 0;
1011 fail:
1012 	dev_kfree_skb_any(skb);
1013 
1014 	return ret;
1015 }
1016 
1017 static enum rtw89_fwdl_check_type
1018 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev,
1019 				     const struct rtw89_fw_suit *fw_suit)
1020 {
1021 	switch (fw_suit->type) {
1022 	case RTW89_FW_BBMCU0:
1023 		return RTW89_FWDL_CHECK_BB0_FWDL_DONE;
1024 	case RTW89_FW_BBMCU1:
1025 		return RTW89_FWDL_CHECK_BB1_FWDL_DONE;
1026 	default:
1027 		return RTW89_FWDL_CHECK_WCPU_FWDL_DONE;
1028 	}
1029 }
1030 
1031 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1032 				  const struct rtw89_fw_suit *fw_suit,
1033 				  struct rtw89_fw_bin_info *info)
1034 {
1035 	struct rtw89_fw_hdr_section_info *section_info = info->section_info;
1036 	const struct rtw89_chip_info *chip = rtwdev->chip;
1037 	enum rtw89_fwdl_check_type chk_type;
1038 	u8 section_num = info->section_num;
1039 	int ret;
1040 
1041 	while (section_num--) {
1042 		ret = __rtw89_fw_download_main(rtwdev, section_info);
1043 		if (ret)
1044 			return ret;
1045 		section_info++;
1046 	}
1047 
1048 	if (chip->chip_gen == RTW89_CHIP_AX)
1049 		return 0;
1050 
1051 	chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit);
1052 	ret = rtw89_fw_check_rdy(rtwdev, chk_type);
1053 	if (ret) {
1054 		rtw89_warn(rtwdev, "failed to download firmware type %u\n",
1055 			   fw_suit->type);
1056 		return ret;
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
1063 {
1064 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
1065 	u32 addr = R_AX_DBG_PORT_SEL;
1066 	u32 val32;
1067 	u16 index;
1068 
1069 	if (chip_gen == RTW89_CHIP_BE) {
1070 		addr = R_BE_WLCPU_PORT_PC;
1071 		goto dump;
1072 	}
1073 
1074 	rtw89_write32(rtwdev, R_AX_DBG_CTRL,
1075 		      FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
1076 		      FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
1077 	rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
1078 
1079 dump:
1080 	for (index = 0; index < 15; index++) {
1081 		val32 = rtw89_read32(rtwdev, addr);
1082 		rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
1083 		fsleep(10);
1084 	}
1085 }
1086 
1087 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
1088 {
1089 	u32 val32;
1090 	u16 val16;
1091 
1092 	val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
1093 	rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
1094 
1095 	val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
1096 	rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
1097 
1098 	rtw89_fw_prog_cnt_dump(rtwdev);
1099 }
1100 
1101 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
1102 				  struct rtw89_fw_suit *fw_suit)
1103 {
1104 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1105 	struct rtw89_fw_bin_info info;
1106 	int ret;
1107 
1108 	ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
1109 	if (ret) {
1110 		rtw89_err(rtwdev, "parse fw header fail\n");
1111 		return ret;
1112 	}
1113 
1114 	if (rtwdev->chip->chip_id == RTL8922A &&
1115 	    (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN))
1116 		rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000);
1117 
1118 	ret = mac->fwdl_check_path_ready(rtwdev, true);
1119 	if (ret) {
1120 		rtw89_err(rtwdev, "[ERR]H2C path ready\n");
1121 		return ret;
1122 	}
1123 
1124 	ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len -
1125 							   info.dynamic_hdr_len);
1126 	if (ret)
1127 		return ret;
1128 
1129 	ret = rtw89_fw_download_main(rtwdev, fw_suit, &info);
1130 	if (ret)
1131 		return ret;
1132 
1133 	return 0;
1134 }
1135 
1136 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1137 		      bool include_bb)
1138 {
1139 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1140 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
1141 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
1142 	u8 bbmcu_nr = rtwdev->chip->bbmcu_nr;
1143 	int ret;
1144 	int i;
1145 
1146 	mac->disable_cpu(rtwdev);
1147 	ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb);
1148 	if (ret)
1149 		return ret;
1150 
1151 	ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1152 	if (ret)
1153 		goto fwdl_err;
1154 
1155 	for (i = 0; i < bbmcu_nr && include_bb; i++) {
1156 		fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i);
1157 
1158 		ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1159 		if (ret)
1160 			goto fwdl_err;
1161 	}
1162 
1163 	fw_info->h2c_seq = 0;
1164 	fw_info->rec_seq = 0;
1165 	fw_info->h2c_counter = 0;
1166 	fw_info->c2h_counter = 0;
1167 	rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
1168 	rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
1169 
1170 	mdelay(5);
1171 
1172 	ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
1173 	if (ret) {
1174 		rtw89_warn(rtwdev, "download firmware fail\n");
1175 		return ret;
1176 	}
1177 
1178 	return ret;
1179 
1180 fwdl_err:
1181 	rtw89_fw_dl_fail_dump(rtwdev);
1182 	return ret;
1183 }
1184 
1185 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
1186 {
1187 	struct rtw89_fw_info *fw = &rtwdev->fw;
1188 
1189 	wait_for_completion(&fw->req.completion);
1190 	if (!fw->req.firmware)
1191 		return -EINVAL;
1192 
1193 	return 0;
1194 }
1195 
1196 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
1197 				   struct rtw89_fw_req_info *req,
1198 				   const char *fw_name, bool nowarn)
1199 {
1200 	int ret;
1201 
1202 	if (req->firmware) {
1203 		rtw89_debug(rtwdev, RTW89_DBG_FW,
1204 			    "full firmware has been early requested\n");
1205 		complete_all(&req->completion);
1206 		return 0;
1207 	}
1208 
1209 	if (nowarn)
1210 		ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
1211 	else
1212 		ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
1213 
1214 	complete_all(&req->completion);
1215 
1216 	return ret;
1217 }
1218 
1219 void rtw89_load_firmware_work(struct work_struct *work)
1220 {
1221 	struct rtw89_dev *rtwdev =
1222 		container_of(work, struct rtw89_dev, load_firmware_work);
1223 	const struct rtw89_chip_info *chip = rtwdev->chip;
1224 	char fw_name[64];
1225 
1226 	rtw89_fw_get_filename(fw_name, sizeof(fw_name),
1227 			      chip->fw_basename, rtwdev->fw.fw_format);
1228 
1229 	rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
1230 }
1231 
1232 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
1233 {
1234 	if (!tbl)
1235 		return;
1236 
1237 	kfree(tbl->regs);
1238 	kfree(tbl);
1239 }
1240 
1241 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
1242 {
1243 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1244 	int i;
1245 
1246 	rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
1247 	rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
1248 	for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
1249 		rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
1250 	rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
1251 
1252 	kfree(elm_info->txpwr_trk);
1253 	kfree(elm_info->rfk_log_fmt);
1254 }
1255 
1256 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
1257 {
1258 	struct rtw89_fw_info *fw = &rtwdev->fw;
1259 
1260 	cancel_work_sync(&rtwdev->load_firmware_work);
1261 
1262 	if (fw->req.firmware) {
1263 		release_firmware(fw->req.firmware);
1264 
1265 		/* assign NULL back in case rtw89_free_ieee80211_hw()
1266 		 * try to release the same one again.
1267 		 */
1268 		fw->req.firmware = NULL;
1269 	}
1270 
1271 	kfree(fw->log.fmts);
1272 	rtw89_unload_firmware_elements(rtwdev);
1273 }
1274 
1275 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
1276 {
1277 	struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
1278 	u32 i;
1279 
1280 	if (fmt_id > fw_log->last_fmt_id)
1281 		return 0;
1282 
1283 	for (i = 0; i < fw_log->fmt_count; i++) {
1284 		if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
1285 			return i;
1286 	}
1287 	return 0;
1288 }
1289 
1290 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
1291 {
1292 	struct rtw89_fw_log *log = &rtwdev->fw.log;
1293 	const struct rtw89_fw_logsuit_hdr *suit_hdr;
1294 	struct rtw89_fw_suit *suit = &log->suit;
1295 	const void *fmts_ptr, *fmts_end_ptr;
1296 	u32 fmt_count;
1297 	int i;
1298 
1299 	suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
1300 	fmt_count = le32_to_cpu(suit_hdr->count);
1301 	log->fmt_ids = suit_hdr->ids;
1302 	fmts_ptr = &suit_hdr->ids[fmt_count];
1303 	fmts_end_ptr = suit->data + suit->size;
1304 	log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
1305 	if (!log->fmts)
1306 		return -ENOMEM;
1307 
1308 	for (i = 0; i < fmt_count; i++) {
1309 		fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
1310 		if (!fmts_ptr)
1311 			break;
1312 
1313 		(*log->fmts)[i] = fmts_ptr;
1314 		log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
1315 		log->fmt_count++;
1316 		fmts_ptr += strlen(fmts_ptr);
1317 	}
1318 
1319 	return 0;
1320 }
1321 
1322 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
1323 {
1324 	struct rtw89_fw_log *log = &rtwdev->fw.log;
1325 	struct rtw89_fw_suit *suit = &log->suit;
1326 
1327 	if (!suit || !suit->data) {
1328 		rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
1329 		return -EINVAL;
1330 	}
1331 	if (log->fmts)
1332 		return 0;
1333 
1334 	return rtw89_fw_log_create_fmts_dict(rtwdev);
1335 }
1336 
1337 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
1338 				   const struct rtw89_fw_c2h_log_fmt *log_fmt,
1339 				   u32 fmt_idx, u8 para_int, bool raw_data)
1340 {
1341 	const char *(*fmts)[] = rtwdev->fw.log.fmts;
1342 	char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
1343 	u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
1344 	int i;
1345 
1346 	if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
1347 		rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
1348 			   log_fmt->argc);
1349 		return;
1350 	}
1351 
1352 	if (para_int)
1353 		for (i = 0 ; i < log_fmt->argc; i++)
1354 			args[i] = le32_to_cpu(log_fmt->u.argv[i]);
1355 
1356 	if (raw_data) {
1357 		if (para_int)
1358 			snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1359 				 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
1360 				 para_int, log_fmt->argc, (int)sizeof(args), args);
1361 		else
1362 			snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1363 				 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
1364 				 para_int, log_fmt->argc, log_fmt->u.raw);
1365 	} else {
1366 		snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
1367 			 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
1368 			 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
1369 			 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
1370 			 args[0xf]);
1371 	}
1372 
1373 	rtw89_info(rtwdev, "C2H log: %s", str_buf);
1374 }
1375 
1376 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
1377 {
1378 	const struct rtw89_fw_c2h_log_fmt *log_fmt;
1379 	u8 para_int;
1380 	u32 fmt_idx;
1381 
1382 	if (len < RTW89_C2H_HEADER_LEN) {
1383 		rtw89_err(rtwdev, "c2h log length is wrong!\n");
1384 		return;
1385 	}
1386 
1387 	buf += RTW89_C2H_HEADER_LEN;
1388 	len -= RTW89_C2H_HEADER_LEN;
1389 	log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
1390 
1391 	if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
1392 		goto plain_log;
1393 
1394 	if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
1395 		goto plain_log;
1396 
1397 	if (!rtwdev->fw.log.fmts)
1398 		return;
1399 
1400 	para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
1401 	fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
1402 
1403 	if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
1404 		rtw89_info(rtwdev, "C2H log: %s%s",
1405 			   (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
1406 	else if (fmt_idx != 0 && para_int)
1407 		rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
1408 	else
1409 		rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
1410 	return;
1411 
1412 plain_log:
1413 	rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
1414 
1415 }
1416 
1417 #define H2C_CAM_LEN 60
1418 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
1419 		     struct rtw89_sta *rtwsta, const u8 *scan_mac_addr)
1420 {
1421 	struct sk_buff *skb;
1422 	int ret;
1423 
1424 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
1425 	if (!skb) {
1426 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1427 		return -ENOMEM;
1428 	}
1429 	skb_put(skb, H2C_CAM_LEN);
1430 	rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data);
1431 	rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data);
1432 
1433 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1434 			      H2C_CAT_MAC,
1435 			      H2C_CL_MAC_ADDR_CAM_UPDATE,
1436 			      H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
1437 			      H2C_CAM_LEN);
1438 
1439 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1440 	if (ret) {
1441 		rtw89_err(rtwdev, "failed to send h2c\n");
1442 		goto fail;
1443 	}
1444 
1445 	return 0;
1446 fail:
1447 	dev_kfree_skb_any(skb);
1448 
1449 	return ret;
1450 }
1451 
1452 #define H2C_DCTL_SEC_CAM_LEN 68
1453 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
1454 				 struct rtw89_vif *rtwvif,
1455 				 struct rtw89_sta *rtwsta)
1456 {
1457 	struct sk_buff *skb;
1458 	int ret;
1459 
1460 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
1461 	if (!skb) {
1462 		rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
1463 		return -ENOMEM;
1464 	}
1465 	skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
1466 
1467 	rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
1468 
1469 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1470 			      H2C_CAT_MAC,
1471 			      H2C_CL_MAC_FR_EXCHG,
1472 			      H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
1473 			      H2C_DCTL_SEC_CAM_LEN);
1474 
1475 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1476 	if (ret) {
1477 		rtw89_err(rtwdev, "failed to send h2c\n");
1478 		goto fail;
1479 	}
1480 
1481 	return 0;
1482 fail:
1483 	dev_kfree_skb_any(skb);
1484 
1485 	return ret;
1486 }
1487 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
1488 
1489 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
1490 				 struct rtw89_vif *rtwvif,
1491 				 struct rtw89_sta *rtwsta)
1492 {
1493 	struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
1494 	u32 len = sizeof(*h2c);
1495 	struct sk_buff *skb;
1496 	int ret;
1497 
1498 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1499 	if (!skb) {
1500 		rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
1501 		return -ENOMEM;
1502 	}
1503 	skb_put(skb, len);
1504 	h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
1505 
1506 	rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c);
1507 
1508 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1509 			      H2C_CAT_MAC,
1510 			      H2C_CL_MAC_FR_EXCHG,
1511 			      H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
1512 			      len);
1513 
1514 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1515 	if (ret) {
1516 		rtw89_err(rtwdev, "failed to send h2c\n");
1517 		goto fail;
1518 	}
1519 
1520 	return 0;
1521 fail:
1522 	dev_kfree_skb_any(skb);
1523 
1524 	return ret;
1525 }
1526 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
1527 
1528 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
1529 				     struct rtw89_vif *rtwvif,
1530 				     struct rtw89_sta *rtwsta)
1531 {
1532 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
1533 	struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
1534 	u32 len = sizeof(*h2c);
1535 	struct sk_buff *skb;
1536 	int ret;
1537 
1538 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1539 	if (!skb) {
1540 		rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
1541 		return -ENOMEM;
1542 	}
1543 	skb_put(skb, len);
1544 	h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
1545 
1546 	h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
1547 		  le32_encode_bits(1, DCTLINFO_V2_C0_OP);
1548 
1549 	h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
1550 	h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
1551 	h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
1552 	h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
1553 	h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
1554 	h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
1555 	h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
1556 	h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
1557 	h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
1558 	h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
1559 	h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
1560 	h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
1561 	h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
1562 
1563 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1564 			      H2C_CAT_MAC,
1565 			      H2C_CL_MAC_FR_EXCHG,
1566 			      H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
1567 			      len);
1568 
1569 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1570 	if (ret) {
1571 		rtw89_err(rtwdev, "failed to send h2c\n");
1572 		goto fail;
1573 	}
1574 
1575 	return 0;
1576 fail:
1577 	dev_kfree_skb_any(skb);
1578 
1579 	return ret;
1580 }
1581 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
1582 
1583 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
1584 			bool valid, struct ieee80211_ampdu_params *params)
1585 {
1586 	const struct rtw89_chip_info *chip = rtwdev->chip;
1587 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
1588 	struct rtw89_h2c_ba_cam *h2c;
1589 	u8 macid = rtwsta->mac_id;
1590 	u32 len = sizeof(*h2c);
1591 	struct sk_buff *skb;
1592 	u8 entry_idx;
1593 	int ret;
1594 
1595 	ret = valid ?
1596 	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
1597 	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
1598 	if (ret) {
1599 		/* it still works even if we don't have static BA CAM, because
1600 		 * hardware can create dynamic BA CAM automatically.
1601 		 */
1602 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
1603 			    "failed to %s entry tid=%d for h2c ba cam\n",
1604 			    valid ? "alloc" : "free", params->tid);
1605 		return 0;
1606 	}
1607 
1608 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1609 	if (!skb) {
1610 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
1611 		return -ENOMEM;
1612 	}
1613 	skb_put(skb, len);
1614 	h2c = (struct rtw89_h2c_ba_cam *)skb->data;
1615 
1616 	h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
1617 	if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
1618 		h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
1619 	else
1620 		h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
1621 	if (!valid)
1622 		goto end;
1623 	h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
1624 		   le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
1625 	if (params->buf_size > 64)
1626 		h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
1627 	else
1628 		h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
1629 	/* If init req is set, hw will set the ssn */
1630 	h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
1631 		   le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
1632 
1633 	if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
1634 		h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
1635 			   le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND);
1636 	}
1637 
1638 end:
1639 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1640 			      H2C_CAT_MAC,
1641 			      H2C_CL_BA_CAM,
1642 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
1643 			      len);
1644 
1645 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1646 	if (ret) {
1647 		rtw89_err(rtwdev, "failed to send h2c\n");
1648 		goto fail;
1649 	}
1650 
1651 	return 0;
1652 fail:
1653 	dev_kfree_skb_any(skb);
1654 
1655 	return ret;
1656 }
1657 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
1658 
1659 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
1660 					   u8 entry_idx, u8 uid)
1661 {
1662 	struct rtw89_h2c_ba_cam *h2c;
1663 	u32 len = sizeof(*h2c);
1664 	struct sk_buff *skb;
1665 	int ret;
1666 
1667 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1668 	if (!skb) {
1669 		rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
1670 		return -ENOMEM;
1671 	}
1672 	skb_put(skb, len);
1673 	h2c = (struct rtw89_h2c_ba_cam *)skb->data;
1674 
1675 	h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
1676 	h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
1677 		  le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
1678 		  le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
1679 		  le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
1680 
1681 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1682 			      H2C_CAT_MAC,
1683 			      H2C_CL_BA_CAM,
1684 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
1685 			      len);
1686 
1687 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1688 	if (ret) {
1689 		rtw89_err(rtwdev, "failed to send h2c\n");
1690 		goto fail;
1691 	}
1692 
1693 	return 0;
1694 fail:
1695 	dev_kfree_skb_any(skb);
1696 
1697 	return ret;
1698 }
1699 
1700 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
1701 {
1702 	const struct rtw89_chip_info *chip = rtwdev->chip;
1703 	u8 entry_idx = chip->bacam_num;
1704 	u8 uid = 0;
1705 	int i;
1706 
1707 	for (i = 0; i < chip->bacam_dynamic_num; i++) {
1708 		rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
1709 		entry_idx++;
1710 		uid++;
1711 	}
1712 }
1713 
1714 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
1715 			   bool valid, struct ieee80211_ampdu_params *params)
1716 {
1717 	const struct rtw89_chip_info *chip = rtwdev->chip;
1718 	struct rtw89_vif *rtwvif = rtwsta->rtwvif;
1719 	struct rtw89_h2c_ba_cam_v1 *h2c;
1720 	u8 macid = rtwsta->mac_id;
1721 	u32 len = sizeof(*h2c);
1722 	struct sk_buff *skb;
1723 	u8 entry_idx;
1724 	u8 bmap_size;
1725 	int ret;
1726 
1727 	ret = valid ?
1728 	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) :
1729 	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx);
1730 	if (ret) {
1731 		/* it still works even if we don't have static BA CAM, because
1732 		 * hardware can create dynamic BA CAM automatically.
1733 		 */
1734 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
1735 			    "failed to %s entry tid=%d for h2c ba cam\n",
1736 			    valid ? "alloc" : "free", params->tid);
1737 		return 0;
1738 	}
1739 
1740 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1741 	if (!skb) {
1742 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
1743 		return -ENOMEM;
1744 	}
1745 	skb_put(skb, len);
1746 	h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
1747 
1748 	if (params->buf_size > 512)
1749 		bmap_size = 10;
1750 	else if (params->buf_size > 256)
1751 		bmap_size = 8;
1752 	else if (params->buf_size > 64)
1753 		bmap_size = 4;
1754 	else
1755 		bmap_size = 0;
1756 
1757 	h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
1758 		  le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
1759 		  le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
1760 		  le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
1761 		  le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
1762 		  le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
1763 
1764 	entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
1765 	h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
1766 		  le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
1767 		  le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
1768 
1769 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1770 			      H2C_CAT_MAC,
1771 			      H2C_CL_BA_CAM,
1772 			      H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
1773 			      len);
1774 
1775 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1776 	if (ret) {
1777 		rtw89_err(rtwdev, "failed to send h2c\n");
1778 		goto fail;
1779 	}
1780 
1781 	return 0;
1782 fail:
1783 	dev_kfree_skb_any(skb);
1784 
1785 	return ret;
1786 }
1787 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
1788 
1789 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
1790 				   u8 offset, u8 mac_idx)
1791 {
1792 	struct rtw89_h2c_ba_cam_init *h2c;
1793 	u32 len = sizeof(*h2c);
1794 	struct sk_buff *skb;
1795 	int ret;
1796 
1797 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1798 	if (!skb) {
1799 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
1800 		return -ENOMEM;
1801 	}
1802 	skb_put(skb, len);
1803 	h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
1804 
1805 	h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
1806 		  le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
1807 		  le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
1808 
1809 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1810 			      H2C_CAT_MAC,
1811 			      H2C_CL_BA_CAM,
1812 			      H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
1813 			      len);
1814 
1815 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1816 	if (ret) {
1817 		rtw89_err(rtwdev, "failed to send h2c\n");
1818 		goto fail;
1819 	}
1820 
1821 	return 0;
1822 fail:
1823 	dev_kfree_skb_any(skb);
1824 
1825 	return ret;
1826 }
1827 
1828 #define H2C_LOG_CFG_LEN 12
1829 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
1830 {
1831 	struct sk_buff *skb;
1832 	u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
1833 			    BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
1834 	int ret;
1835 
1836 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
1837 	if (!skb) {
1838 		rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
1839 		return -ENOMEM;
1840 	}
1841 
1842 	skb_put(skb, H2C_LOG_CFG_LEN);
1843 	SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
1844 	SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
1845 	SET_LOG_CFG_COMP(skb->data, comp);
1846 	SET_LOG_CFG_COMP_EXT(skb->data, 0);
1847 
1848 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1849 			      H2C_CAT_MAC,
1850 			      H2C_CL_FW_INFO,
1851 			      H2C_FUNC_LOG_CFG, 0, 0,
1852 			      H2C_LOG_CFG_LEN);
1853 
1854 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1855 	if (ret) {
1856 		rtw89_err(rtwdev, "failed to send h2c\n");
1857 		goto fail;
1858 	}
1859 
1860 	return 0;
1861 fail:
1862 	dev_kfree_skb_any(skb);
1863 
1864 	return ret;
1865 }
1866 
1867 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
1868 					struct rtw89_vif *rtwvif,
1869 					enum rtw89_fw_pkt_ofld_type type,
1870 					u8 *id)
1871 {
1872 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
1873 	struct rtw89_pktofld_info *info;
1874 	struct sk_buff *skb;
1875 	int ret;
1876 
1877 	info = kzalloc(sizeof(*info), GFP_KERNEL);
1878 	if (!info)
1879 		return -ENOMEM;
1880 
1881 	switch (type) {
1882 	case RTW89_PKT_OFLD_TYPE_PS_POLL:
1883 		skb = ieee80211_pspoll_get(rtwdev->hw, vif);
1884 		break;
1885 	case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
1886 		skb = ieee80211_proberesp_get(rtwdev->hw, vif);
1887 		break;
1888 	case RTW89_PKT_OFLD_TYPE_NULL_DATA:
1889 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false);
1890 		break;
1891 	case RTW89_PKT_OFLD_TYPE_QOS_NULL:
1892 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
1893 		break;
1894 	default:
1895 		goto err;
1896 	}
1897 
1898 	if (!skb)
1899 		goto err;
1900 
1901 	ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
1902 	kfree_skb(skb);
1903 
1904 	if (ret)
1905 		goto err;
1906 
1907 	list_add_tail(&info->list, &rtwvif->general_pkt_list);
1908 	*id = info->id;
1909 	return 0;
1910 
1911 err:
1912 	kfree(info);
1913 	return -ENOMEM;
1914 }
1915 
1916 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
1917 					   struct rtw89_vif *rtwvif, bool notify_fw)
1918 {
1919 	struct list_head *pkt_list = &rtwvif->general_pkt_list;
1920 	struct rtw89_pktofld_info *info, *tmp;
1921 
1922 	list_for_each_entry_safe(info, tmp, pkt_list, list) {
1923 		if (notify_fw)
1924 			rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
1925 		else
1926 			rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
1927 		list_del(&info->list);
1928 		kfree(info);
1929 	}
1930 }
1931 
1932 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
1933 {
1934 	struct rtw89_vif *rtwvif;
1935 
1936 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
1937 		rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw);
1938 }
1939 
1940 #define H2C_GENERAL_PKT_LEN 6
1941 #define H2C_GENERAL_PKT_ID_UND 0xff
1942 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
1943 			     struct rtw89_vif *rtwvif, u8 macid)
1944 {
1945 	u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
1946 	u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
1947 	u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
1948 	struct sk_buff *skb;
1949 	int ret;
1950 
1951 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1952 				     RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
1953 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1954 				     RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
1955 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
1956 				     RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
1957 
1958 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
1959 	if (!skb) {
1960 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1961 		return -ENOMEM;
1962 	}
1963 	skb_put(skb, H2C_GENERAL_PKT_LEN);
1964 	SET_GENERAL_PKT_MACID(skb->data, macid);
1965 	SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1966 	SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
1967 	SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
1968 	SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
1969 	SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
1970 
1971 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
1972 			      H2C_CAT_MAC,
1973 			      H2C_CL_FW_INFO,
1974 			      H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
1975 			      H2C_GENERAL_PKT_LEN);
1976 
1977 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1978 	if (ret) {
1979 		rtw89_err(rtwdev, "failed to send h2c\n");
1980 		goto fail;
1981 	}
1982 
1983 	return 0;
1984 fail:
1985 	dev_kfree_skb_any(skb);
1986 
1987 	return ret;
1988 }
1989 
1990 #define H2C_LPS_PARM_LEN 8
1991 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
1992 			  struct rtw89_lps_parm *lps_param)
1993 {
1994 	struct sk_buff *skb;
1995 	int ret;
1996 
1997 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
1998 	if (!skb) {
1999 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2000 		return -ENOMEM;
2001 	}
2002 	skb_put(skb, H2C_LPS_PARM_LEN);
2003 
2004 	SET_LPS_PARM_MACID(skb->data, lps_param->macid);
2005 	SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
2006 	SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
2007 	SET_LPS_PARM_RLBM(skb->data, 1);
2008 	SET_LPS_PARM_SMARTPS(skb->data, 1);
2009 	SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
2010 	SET_LPS_PARM_VOUAPSD(skb->data, 0);
2011 	SET_LPS_PARM_VIUAPSD(skb->data, 0);
2012 	SET_LPS_PARM_BEUAPSD(skb->data, 0);
2013 	SET_LPS_PARM_BKUAPSD(skb->data, 0);
2014 
2015 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2016 			      H2C_CAT_MAC,
2017 			      H2C_CL_MAC_PS,
2018 			      H2C_FUNC_MAC_LPS_PARM, 0, 1,
2019 			      H2C_LPS_PARM_LEN);
2020 
2021 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2022 	if (ret) {
2023 		rtw89_err(rtwdev, "failed to send h2c\n");
2024 		goto fail;
2025 	}
2026 
2027 	return 0;
2028 fail:
2029 	dev_kfree_skb_any(skb);
2030 
2031 	return ret;
2032 }
2033 
2034 #define H2C_P2P_ACT_LEN 20
2035 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
2036 			 struct ieee80211_p2p_noa_desc *desc,
2037 			 u8 act, u8 noa_id)
2038 {
2039 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
2040 	bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
2041 	u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
2042 	struct sk_buff *skb;
2043 	u8 *cmd;
2044 	int ret;
2045 
2046 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
2047 	if (!skb) {
2048 		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
2049 		return -ENOMEM;
2050 	}
2051 	skb_put(skb, H2C_P2P_ACT_LEN);
2052 	cmd = skb->data;
2053 
2054 	RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id);
2055 	RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
2056 	RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
2057 	RTW89_SET_FWCMD_P2P_ACT(cmd, act);
2058 	RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
2059 	RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
2060 	if (desc) {
2061 		RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
2062 		RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
2063 		RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
2064 		RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
2065 		RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
2066 	}
2067 
2068 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2069 			      H2C_CAT_MAC, H2C_CL_MAC_PS,
2070 			      H2C_FUNC_P2P_ACT, 0, 0,
2071 			      H2C_P2P_ACT_LEN);
2072 
2073 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2074 	if (ret) {
2075 		rtw89_err(rtwdev, "failed to send h2c\n");
2076 		goto fail;
2077 	}
2078 
2079 	return 0;
2080 fail:
2081 	dev_kfree_skb_any(skb);
2082 
2083 	return ret;
2084 }
2085 
2086 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
2087 				       struct sk_buff *skb)
2088 {
2089 	const struct rtw89_chip_info *chip = rtwdev->chip;
2090 	struct rtw89_hal *hal = &rtwdev->hal;
2091 	u8 ntx_path;
2092 	u8 map_b;
2093 
2094 	if (chip->rf_path_num == 1) {
2095 		ntx_path = RF_A;
2096 		map_b = 0;
2097 	} else {
2098 		ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
2099 		map_b = hal->antenna_tx == RF_AB ? 1 : 0;
2100 	}
2101 
2102 	SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
2103 	SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
2104 	SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
2105 	SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
2106 	SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
2107 }
2108 
2109 #define H2C_CMC_TBL_LEN 68
2110 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
2111 				  struct rtw89_vif *rtwvif,
2112 				  struct rtw89_sta *rtwsta)
2113 {
2114 	const struct rtw89_chip_info *chip = rtwdev->chip;
2115 	u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2116 	struct sk_buff *skb;
2117 	int ret;
2118 
2119 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
2120 	if (!skb) {
2121 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2122 		return -ENOMEM;
2123 	}
2124 	skb_put(skb, H2C_CMC_TBL_LEN);
2125 	SET_CTRL_INFO_MACID(skb->data, macid);
2126 	SET_CTRL_INFO_OPERATION(skb->data, 1);
2127 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
2128 		SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
2129 		__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
2130 		SET_CMC_TBL_ANTSEL_A(skb->data, 0);
2131 		SET_CMC_TBL_ANTSEL_B(skb->data, 0);
2132 		SET_CMC_TBL_ANTSEL_C(skb->data, 0);
2133 		SET_CMC_TBL_ANTSEL_D(skb->data, 0);
2134 	}
2135 	SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
2136 	SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
2137 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
2138 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
2139 
2140 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2141 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2142 			      chip->h2c_cctl_func_id, 0, 1,
2143 			      H2C_CMC_TBL_LEN);
2144 
2145 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2146 	if (ret) {
2147 		rtw89_err(rtwdev, "failed to send h2c\n");
2148 		goto fail;
2149 	}
2150 
2151 	return 0;
2152 fail:
2153 	dev_kfree_skb_any(skb);
2154 
2155 	return ret;
2156 }
2157 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
2158 
2159 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
2160 				     struct rtw89_vif *rtwvif,
2161 				     struct rtw89_sta *rtwsta)
2162 {
2163 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2164 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
2165 	u32 len = sizeof(*h2c);
2166 	struct sk_buff *skb;
2167 	int ret;
2168 
2169 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2170 	if (!skb) {
2171 		rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
2172 		return -ENOMEM;
2173 	}
2174 	skb_put(skb, len);
2175 	h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
2176 
2177 	h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
2178 		  le32_encode_bits(1, CCTLINFO_G7_C0_OP);
2179 
2180 	h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
2181 	h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
2182 
2183 	h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
2184 		  le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
2185 		  le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
2186 	h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
2187 
2188 	h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
2189 
2190 	h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
2191 
2192 	h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
2193 	h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
2194 
2195 	h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
2196 		  le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
2197 		  le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
2198 		  le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
2199 		  le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
2200 	h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
2201 
2202 	h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
2203 	h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
2204 
2205 	h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
2206 		  le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
2207 		  le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
2208 		  le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
2209 		  le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
2210 	h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
2211 
2212 	h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
2213 
2214 	h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
2215 		   le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
2216 		   le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
2217 	h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
2218 
2219 	h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
2220 		   le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
2221 		   le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
2222 	h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
2223 
2224 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2225 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2226 			      H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
2227 			      len);
2228 
2229 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2230 	if (ret) {
2231 		rtw89_err(rtwdev, "failed to send h2c\n");
2232 		goto fail;
2233 	}
2234 
2235 	return 0;
2236 fail:
2237 	dev_kfree_skb_any(skb);
2238 
2239 	return ret;
2240 }
2241 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
2242 
2243 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
2244 				     struct ieee80211_sta *sta, u8 *pads)
2245 {
2246 	bool ppe_th;
2247 	u8 ppe16, ppe8;
2248 	u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
2249 	u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0];
2250 	u8 ru_bitmap;
2251 	u8 n, idx, sh;
2252 	u16 ppe;
2253 	int i;
2254 
2255 	ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
2256 			   sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]);
2257 	if (!ppe_th) {
2258 		u8 pad;
2259 
2260 		pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
2261 				sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]);
2262 
2263 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
2264 			pads[i] = pad;
2265 
2266 		return;
2267 	}
2268 
2269 	ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
2270 	n = hweight8(ru_bitmap);
2271 	n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
2272 
2273 	for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
2274 		if (!(ru_bitmap & BIT(i))) {
2275 			pads[i] = 1;
2276 			continue;
2277 		}
2278 
2279 		idx = n >> 3;
2280 		sh = n & 7;
2281 		n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
2282 
2283 		ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx]));
2284 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
2285 		sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2286 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
2287 
2288 		if (ppe16 != 7 && ppe8 == 7)
2289 			pads[i] = 2;
2290 		else if (ppe8 != 7)
2291 			pads[i] = 1;
2292 		else
2293 			pads[i] = 0;
2294 	}
2295 }
2296 
2297 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
2298 				struct ieee80211_vif *vif,
2299 				struct ieee80211_sta *sta)
2300 {
2301 	const struct rtw89_chip_info *chip = rtwdev->chip;
2302 	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
2303 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
2304 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
2305 						       rtwvif->sub_entity_idx);
2306 	struct sk_buff *skb;
2307 	u8 pads[RTW89_PPE_BW_NUM];
2308 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2309 	u16 lowest_rate;
2310 	int ret;
2311 
2312 	memset(pads, 0, sizeof(pads));
2313 	if (sta && sta->deflink.he_cap.has_he)
2314 		__get_sta_he_pkt_padding(rtwdev, sta, pads);
2315 
2316 	if (vif->p2p)
2317 		lowest_rate = RTW89_HW_RATE_OFDM6;
2318 	else if (chan->band_type == RTW89_BAND_2G)
2319 		lowest_rate = RTW89_HW_RATE_CCK1;
2320 	else
2321 		lowest_rate = RTW89_HW_RATE_OFDM6;
2322 
2323 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
2324 	if (!skb) {
2325 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2326 		return -ENOMEM;
2327 	}
2328 	skb_put(skb, H2C_CMC_TBL_LEN);
2329 	SET_CTRL_INFO_MACID(skb->data, mac_id);
2330 	SET_CTRL_INFO_OPERATION(skb->data, 1);
2331 	SET_CMC_TBL_DISRTSFB(skb->data, 1);
2332 	SET_CMC_TBL_DISDATAFB(skb->data, 1);
2333 	SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
2334 	SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
2335 	SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
2336 	if (vif->type == NL80211_IFTYPE_STATION)
2337 		SET_CMC_TBL_ULDL(skb->data, 1);
2338 	else
2339 		SET_CMC_TBL_ULDL(skb->data, 0);
2340 	SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
2341 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
2342 		SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
2343 		SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
2344 		SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
2345 		SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
2346 	} else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
2347 		SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
2348 		SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
2349 		SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
2350 		SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
2351 	}
2352 	if (sta)
2353 		SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
2354 						  sta->deflink.he_cap.has_he);
2355 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
2356 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
2357 
2358 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2359 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2360 			      chip->h2c_cctl_func_id, 0, 1,
2361 			      H2C_CMC_TBL_LEN);
2362 
2363 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2364 	if (ret) {
2365 		rtw89_err(rtwdev, "failed to send h2c\n");
2366 		goto fail;
2367 	}
2368 
2369 	return 0;
2370 fail:
2371 	dev_kfree_skb_any(skb);
2372 
2373 	return ret;
2374 }
2375 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
2376 
2377 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
2378 				      struct ieee80211_sta *sta, u8 *pads)
2379 {
2380 	u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1;
2381 	u16 ppe_thres_hdr;
2382 	u8 ppe16, ppe8;
2383 	u8 n, idx, sh;
2384 	u8 ru_bitmap;
2385 	bool ppe_th;
2386 	u16 ppe;
2387 	int i;
2388 
2389 	ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
2390 			       IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
2391 	if (!ppe_th) {
2392 		u8 pad;
2393 
2394 		pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5],
2395 				  IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
2396 
2397 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
2398 			pads[i] = pad;
2399 
2400 		return;
2401 	}
2402 
2403 	ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres);
2404 	ru_bitmap = u16_get_bits(ppe_thres_hdr,
2405 				 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
2406 	n = hweight8(ru_bitmap);
2407 	n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
2408 	    (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
2409 
2410 	for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
2411 		if (!(ru_bitmap & BIT(i))) {
2412 			pads[i] = 1;
2413 			continue;
2414 		}
2415 
2416 		idx = n >> 3;
2417 		sh = n & 7;
2418 		n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
2419 
2420 		ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx);
2421 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
2422 		sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
2423 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
2424 
2425 		if (ppe16 != 7 && ppe8 == 7)
2426 			pads[i] = 2;
2427 		else if (ppe8 != 7)
2428 			pads[i] = 1;
2429 		else
2430 			pads[i] = 0;
2431 	}
2432 }
2433 
2434 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
2435 				   struct ieee80211_vif *vif,
2436 				   struct ieee80211_sta *sta)
2437 {
2438 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2439 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
2440 	struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
2441 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2442 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
2443 	u8 pads[RTW89_PPE_BW_NUM];
2444 	u32 len = sizeof(*h2c);
2445 	struct sk_buff *skb;
2446 	u16 lowest_rate;
2447 	int ret;
2448 
2449 	memset(pads, 0, sizeof(pads));
2450 	if (sta) {
2451 		if (sta->deflink.eht_cap.has_eht)
2452 			__get_sta_eht_pkt_padding(rtwdev, sta, pads);
2453 		else if (sta->deflink.he_cap.has_he)
2454 			__get_sta_he_pkt_padding(rtwdev, sta, pads);
2455 	}
2456 
2457 	if (vif->p2p)
2458 		lowest_rate = RTW89_HW_RATE_OFDM6;
2459 	else if (chan->band_type == RTW89_BAND_2G)
2460 		lowest_rate = RTW89_HW_RATE_CCK1;
2461 	else
2462 		lowest_rate = RTW89_HW_RATE_OFDM6;
2463 
2464 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2465 	if (!skb) {
2466 		rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
2467 		return -ENOMEM;
2468 	}
2469 	skb_put(skb, len);
2470 	h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
2471 
2472 	h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
2473 		  le32_encode_bits(1, CCTLINFO_G7_C0_OP);
2474 
2475 	h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
2476 		  le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
2477 	h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
2478 			      CCTLINFO_G7_W0_DISDATAFB);
2479 
2480 	h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
2481 	h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
2482 
2483 	h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
2484 	h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
2485 
2486 	h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
2487 	h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
2488 
2489 	h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
2490 	h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
2491 
2492 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
2493 		h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
2494 		h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
2495 	}
2496 
2497 	if (vif->bss_conf.eht_support) {
2498 		h2c->w4 |= le32_encode_bits(~vif->bss_conf.eht_puncturing,
2499 					    CCTLINFO_G7_W4_ACT_SUBCH_CBW);
2500 		h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
2501 	}
2502 
2503 	h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
2504 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
2505 		  le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
2506 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
2507 		  le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
2508 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
2509 		  le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
2510 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
2511 		  le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
2512 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
2513 	h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
2514 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
2515 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
2516 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
2517 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
2518 
2519 	h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
2520 				   CCTLINFO_G7_W6_ULDL);
2521 	h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL);
2522 
2523 	if (sta) {
2524 		h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he,
2525 					   CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
2526 		h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
2527 	}
2528 
2529 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2530 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2531 			      H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
2532 			      len);
2533 
2534 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2535 	if (ret) {
2536 		rtw89_err(rtwdev, "failed to send h2c\n");
2537 		goto fail;
2538 	}
2539 
2540 	return 0;
2541 fail:
2542 	dev_kfree_skb_any(skb);
2543 
2544 	return ret;
2545 }
2546 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
2547 
2548 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
2549 				   struct ieee80211_vif *vif,
2550 				   struct ieee80211_sta *sta)
2551 {
2552 	struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
2553 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
2554 	u32 len = sizeof(*h2c);
2555 	struct sk_buff *skb;
2556 	u16 agg_num = 0;
2557 	u8 ba_bmap = 0;
2558 	int ret;
2559 	u8 tid;
2560 
2561 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2562 	if (!skb) {
2563 		rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
2564 		return -ENOMEM;
2565 	}
2566 	skb_put(skb, len);
2567 	h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
2568 
2569 	for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
2570 		if (agg_num == 0)
2571 			agg_num = rtwsta->ampdu_params[tid].agg_num;
2572 		else
2573 			agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
2574 	}
2575 
2576 	if (agg_num <= 0x20)
2577 		ba_bmap = 3;
2578 	else if (agg_num > 0x20 && agg_num <= 0x40)
2579 		ba_bmap = 0;
2580 	else if (agg_num > 0x40 && agg_num <= 0x80)
2581 		ba_bmap = 1;
2582 	else if (agg_num > 0x80 && agg_num <= 0x100)
2583 		ba_bmap = 2;
2584 	else if (agg_num > 0x100 && agg_num <= 0x200)
2585 		ba_bmap = 4;
2586 	else if (agg_num > 0x200 && agg_num <= 0x400)
2587 		ba_bmap = 5;
2588 
2589 	h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) |
2590 		  le32_encode_bits(1, CCTLINFO_G7_C0_OP);
2591 
2592 	h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
2593 	h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
2594 
2595 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2596 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2597 			      H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
2598 			      len);
2599 
2600 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2601 	if (ret) {
2602 		rtw89_err(rtwdev, "failed to send h2c\n");
2603 		goto fail;
2604 	}
2605 
2606 	return 0;
2607 fail:
2608 	dev_kfree_skb_any(skb);
2609 
2610 	return ret;
2611 }
2612 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
2613 
2614 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
2615 				 struct rtw89_sta *rtwsta)
2616 {
2617 	const struct rtw89_chip_info *chip = rtwdev->chip;
2618 	struct sk_buff *skb;
2619 	int ret;
2620 
2621 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
2622 	if (!skb) {
2623 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2624 		return -ENOMEM;
2625 	}
2626 	skb_put(skb, H2C_CMC_TBL_LEN);
2627 	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
2628 	SET_CTRL_INFO_OPERATION(skb->data, 1);
2629 	if (rtwsta->cctl_tx_time) {
2630 		SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
2631 		SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
2632 	}
2633 	if (rtwsta->cctl_tx_retry_limit) {
2634 		SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
2635 		SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
2636 	}
2637 
2638 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2639 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2640 			      chip->h2c_cctl_func_id, 0, 1,
2641 			      H2C_CMC_TBL_LEN);
2642 
2643 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2644 	if (ret) {
2645 		rtw89_err(rtwdev, "failed to send h2c\n");
2646 		goto fail;
2647 	}
2648 
2649 	return 0;
2650 fail:
2651 	dev_kfree_skb_any(skb);
2652 
2653 	return ret;
2654 }
2655 
2656 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
2657 				 struct rtw89_sta *rtwsta)
2658 {
2659 	const struct rtw89_chip_info *chip = rtwdev->chip;
2660 	struct sk_buff *skb;
2661 	int ret;
2662 
2663 	if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
2664 		return 0;
2665 
2666 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
2667 	if (!skb) {
2668 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2669 		return -ENOMEM;
2670 	}
2671 	skb_put(skb, H2C_CMC_TBL_LEN);
2672 	SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
2673 	SET_CTRL_INFO_OPERATION(skb->data, 1);
2674 
2675 	__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
2676 
2677 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2678 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2679 			      H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
2680 			      H2C_CMC_TBL_LEN);
2681 
2682 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2683 	if (ret) {
2684 		rtw89_err(rtwdev, "failed to send h2c\n");
2685 		goto fail;
2686 	}
2687 
2688 	return 0;
2689 fail:
2690 	dev_kfree_skb_any(skb);
2691 
2692 	return ret;
2693 }
2694 
2695 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
2696 			       struct rtw89_vif *rtwvif)
2697 {
2698 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
2699 						       rtwvif->sub_entity_idx);
2700 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
2701 	struct rtw89_h2c_bcn_upd *h2c;
2702 	struct sk_buff *skb_beacon;
2703 	struct ieee80211_hdr *hdr;
2704 	u32 len = sizeof(*h2c);
2705 	struct sk_buff *skb;
2706 	int bcn_total_len;
2707 	u16 beacon_rate;
2708 	u16 tim_offset;
2709 	void *noa_data;
2710 	u8 noa_len;
2711 	int ret;
2712 
2713 	if (vif->p2p)
2714 		beacon_rate = RTW89_HW_RATE_OFDM6;
2715 	else if (chan->band_type == RTW89_BAND_2G)
2716 		beacon_rate = RTW89_HW_RATE_CCK1;
2717 	else
2718 		beacon_rate = RTW89_HW_RATE_OFDM6;
2719 
2720 	skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
2721 					      NULL, 0);
2722 	if (!skb_beacon) {
2723 		rtw89_err(rtwdev, "failed to get beacon skb\n");
2724 		return -ENOMEM;
2725 	}
2726 
2727 	noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
2728 	if (noa_len &&
2729 	    (noa_len <= skb_tailroom(skb_beacon) ||
2730 	     pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
2731 		skb_put_data(skb_beacon, noa_data, noa_len);
2732 	}
2733 
2734 	hdr = (struct ieee80211_hdr *)skb_beacon;
2735 	tim_offset -= ieee80211_hdrlen(hdr->frame_control);
2736 
2737 	bcn_total_len = len + skb_beacon->len;
2738 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
2739 	if (!skb) {
2740 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2741 		dev_kfree_skb_any(skb_beacon);
2742 		return -ENOMEM;
2743 	}
2744 	skb_put(skb, len);
2745 	h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
2746 
2747 	h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) |
2748 		  le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
2749 		  le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
2750 		  le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
2751 	h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
2752 		  le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
2753 		  le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
2754 		  le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
2755 
2756 	skb_put_data(skb, skb_beacon->data, skb_beacon->len);
2757 	dev_kfree_skb_any(skb_beacon);
2758 
2759 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2760 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2761 			      H2C_FUNC_MAC_BCN_UPD, 0, 1,
2762 			      bcn_total_len);
2763 
2764 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2765 	if (ret) {
2766 		rtw89_err(rtwdev, "failed to send h2c\n");
2767 		dev_kfree_skb_any(skb);
2768 		return ret;
2769 	}
2770 
2771 	return 0;
2772 }
2773 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
2774 
2775 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
2776 				  struct rtw89_vif *rtwvif)
2777 {
2778 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
2779 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
2780 	struct rtw89_h2c_bcn_upd_be *h2c;
2781 	struct sk_buff *skb_beacon;
2782 	struct ieee80211_hdr *hdr;
2783 	u32 len = sizeof(*h2c);
2784 	struct sk_buff *skb;
2785 	int bcn_total_len;
2786 	u16 beacon_rate;
2787 	u16 tim_offset;
2788 	void *noa_data;
2789 	u8 noa_len;
2790 	int ret;
2791 
2792 	if (vif->p2p)
2793 		beacon_rate = RTW89_HW_RATE_OFDM6;
2794 	else if (chan->band_type == RTW89_BAND_2G)
2795 		beacon_rate = RTW89_HW_RATE_CCK1;
2796 	else
2797 		beacon_rate = RTW89_HW_RATE_OFDM6;
2798 
2799 	skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
2800 					      NULL, 0);
2801 	if (!skb_beacon) {
2802 		rtw89_err(rtwdev, "failed to get beacon skb\n");
2803 		return -ENOMEM;
2804 	}
2805 
2806 	noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data);
2807 	if (noa_len &&
2808 	    (noa_len <= skb_tailroom(skb_beacon) ||
2809 	     pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
2810 		skb_put_data(skb_beacon, noa_data, noa_len);
2811 	}
2812 
2813 	hdr = (struct ieee80211_hdr *)skb_beacon;
2814 	tim_offset -= ieee80211_hdrlen(hdr->frame_control);
2815 
2816 	bcn_total_len = len + skb_beacon->len;
2817 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
2818 	if (!skb) {
2819 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2820 		dev_kfree_skb_any(skb_beacon);
2821 		return -ENOMEM;
2822 	}
2823 	skb_put(skb, len);
2824 	h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
2825 
2826 	h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
2827 		  le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
2828 		  le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
2829 		  le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
2830 	h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
2831 		  le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
2832 		  le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
2833 		  le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
2834 
2835 	skb_put_data(skb, skb_beacon->data, skb_beacon->len);
2836 	dev_kfree_skb_any(skb_beacon);
2837 
2838 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2839 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
2840 			      H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
2841 			      bcn_total_len);
2842 
2843 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2844 	if (ret) {
2845 		rtw89_err(rtwdev, "failed to send h2c\n");
2846 		goto fail;
2847 	}
2848 
2849 	return 0;
2850 
2851 fail:
2852 	dev_kfree_skb_any(skb);
2853 
2854 	return ret;
2855 }
2856 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
2857 
2858 #define H2C_ROLE_MAINTAIN_LEN 4
2859 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
2860 			       struct rtw89_vif *rtwvif,
2861 			       struct rtw89_sta *rtwsta,
2862 			       enum rtw89_upd_mode upd_mode)
2863 {
2864 	struct sk_buff *skb;
2865 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2866 	u8 self_role;
2867 	int ret;
2868 
2869 	if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
2870 		if (rtwsta)
2871 			self_role = RTW89_SELF_ROLE_AP_CLIENT;
2872 		else
2873 			self_role = rtwvif->self_role;
2874 	} else {
2875 		self_role = rtwvif->self_role;
2876 	}
2877 
2878 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN);
2879 	if (!skb) {
2880 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2881 		return -ENOMEM;
2882 	}
2883 	skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
2884 	SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
2885 	SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
2886 	SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
2887 	SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
2888 
2889 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2890 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2891 			      H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
2892 			      H2C_ROLE_MAINTAIN_LEN);
2893 
2894 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2895 	if (ret) {
2896 		rtw89_err(rtwdev, "failed to send h2c\n");
2897 		goto fail;
2898 	}
2899 
2900 	return 0;
2901 fail:
2902 	dev_kfree_skb_any(skb);
2903 
2904 	return ret;
2905 }
2906 
2907 static enum rtw89_fw_sta_type
2908 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2909 		      struct rtw89_sta *rtwsta)
2910 {
2911 	struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
2912 	struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
2913 
2914 	if (!sta)
2915 		goto by_vif;
2916 
2917 	if (sta->deflink.eht_cap.has_eht)
2918 		return RTW89_FW_BE_STA;
2919 	else if (sta->deflink.he_cap.has_he)
2920 		return RTW89_FW_AX_STA;
2921 	else
2922 		return RTW89_FW_N_AC_STA;
2923 
2924 by_vif:
2925 	if (vif->bss_conf.eht_support)
2926 		return RTW89_FW_BE_STA;
2927 	else if (vif->bss_conf.he_support)
2928 		return RTW89_FW_AX_STA;
2929 	else
2930 		return RTW89_FW_N_AC_STA;
2931 }
2932 
2933 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
2934 			   struct rtw89_sta *rtwsta, bool dis_conn)
2935 {
2936 	struct sk_buff *skb;
2937 	u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
2938 	u8 self_role = rtwvif->self_role;
2939 	enum rtw89_fw_sta_type sta_type;
2940 	u8 net_type = rtwvif->net_type;
2941 	struct rtw89_h2c_join_v1 *h2c_v1;
2942 	struct rtw89_h2c_join *h2c;
2943 	u32 len = sizeof(*h2c);
2944 	bool format_v1 = false;
2945 	int ret;
2946 
2947 	if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
2948 		len = sizeof(*h2c_v1);
2949 		format_v1 = true;
2950 	}
2951 
2952 	if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
2953 		self_role = RTW89_SELF_ROLE_AP_CLIENT;
2954 		net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
2955 	}
2956 
2957 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2958 	if (!skb) {
2959 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
2960 		return -ENOMEM;
2961 	}
2962 	skb_put(skb, len);
2963 	h2c = (struct rtw89_h2c_join *)skb->data;
2964 
2965 	h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
2966 		  le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
2967 		  le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
2968 		  le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) |
2969 		  le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) |
2970 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
2971 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
2972 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
2973 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
2974 		  le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
2975 		  le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
2976 		  le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
2977 		  le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
2978 
2979 	if (!format_v1)
2980 		goto done;
2981 
2982 	h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
2983 
2984 	sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta);
2985 
2986 	h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
2987 	h2c_v1->w2 = 0;
2988 
2989 done:
2990 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2991 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
2992 			      H2C_FUNC_MAC_JOININFO, 0, 1,
2993 			      len);
2994 
2995 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2996 	if (ret) {
2997 		rtw89_err(rtwdev, "failed to send h2c\n");
2998 		goto fail;
2999 	}
3000 
3001 	return 0;
3002 fail:
3003 	dev_kfree_skb_any(skb);
3004 
3005 	return ret;
3006 }
3007 
3008 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en)
3009 {
3010 	struct rtw89_h2c_notify_dbcc *h2c;
3011 	u32 len = sizeof(*h2c);
3012 	struct sk_buff *skb;
3013 	int ret;
3014 
3015 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3016 	if (!skb) {
3017 		rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n");
3018 		return -ENOMEM;
3019 	}
3020 	skb_put(skb, len);
3021 	h2c = (struct rtw89_h2c_notify_dbcc *)skb->data;
3022 
3023 	h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN);
3024 
3025 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3026 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
3027 			      H2C_FUNC_NOTIFY_DBCC, 0, 1,
3028 			      len);
3029 
3030 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3031 	if (ret) {
3032 		rtw89_err(rtwdev, "failed to send h2c\n");
3033 		goto fail;
3034 	}
3035 
3036 	return 0;
3037 fail:
3038 	dev_kfree_skb_any(skb);
3039 
3040 	return ret;
3041 }
3042 
3043 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
3044 			     bool pause)
3045 {
3046 	struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
3047 	struct rtw89_fw_macid_pause_grp *h2c;
3048 	__le32 set = cpu_to_le32(BIT(sh));
3049 	u8 h2c_macid_pause_id;
3050 	struct sk_buff *skb;
3051 	u32 len;
3052 	int ret;
3053 
3054 	if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
3055 		h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
3056 		len = sizeof(*h2c_new);
3057 	} else {
3058 		h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
3059 		len = sizeof(*h2c);
3060 	}
3061 
3062 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3063 	if (!skb) {
3064 		rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
3065 		return -ENOMEM;
3066 	}
3067 	skb_put(skb, len);
3068 
3069 	if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
3070 		h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
3071 
3072 		h2c_new->n[0].pause_mask_grp[grp] = set;
3073 		h2c_new->n[0].sleep_mask_grp[grp] = set;
3074 		if (pause) {
3075 			h2c_new->n[0].pause_grp[grp] = set;
3076 			h2c_new->n[0].sleep_grp[grp] = set;
3077 		}
3078 	} else {
3079 		h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
3080 
3081 		h2c->mask_grp[grp] = set;
3082 		if (pause)
3083 			h2c->pause_grp[grp] = set;
3084 	}
3085 
3086 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3087 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3088 			      h2c_macid_pause_id, 1, 0,
3089 			      len);
3090 
3091 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3092 	if (ret) {
3093 		rtw89_err(rtwdev, "failed to send h2c\n");
3094 		goto fail;
3095 	}
3096 
3097 	return 0;
3098 fail:
3099 	dev_kfree_skb_any(skb);
3100 
3101 	return ret;
3102 }
3103 
3104 #define H2C_EDCA_LEN 12
3105 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
3106 			  u8 ac, u32 val)
3107 {
3108 	struct sk_buff *skb;
3109 	int ret;
3110 
3111 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
3112 	if (!skb) {
3113 		rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
3114 		return -ENOMEM;
3115 	}
3116 	skb_put(skb, H2C_EDCA_LEN);
3117 	RTW89_SET_EDCA_SEL(skb->data, 0);
3118 	RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
3119 	RTW89_SET_EDCA_WMM(skb->data, 0);
3120 	RTW89_SET_EDCA_AC(skb->data, ac);
3121 	RTW89_SET_EDCA_PARAM(skb->data, val);
3122 
3123 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3124 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3125 			      H2C_FUNC_USR_EDCA, 0, 1,
3126 			      H2C_EDCA_LEN);
3127 
3128 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3129 	if (ret) {
3130 		rtw89_err(rtwdev, "failed to send h2c\n");
3131 		goto fail;
3132 	}
3133 
3134 	return 0;
3135 fail:
3136 	dev_kfree_skb_any(skb);
3137 
3138 	return ret;
3139 }
3140 
3141 #define H2C_TSF32_TOGL_LEN 4
3142 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
3143 			      bool en)
3144 {
3145 	struct sk_buff *skb;
3146 	u16 early_us = en ? 2000 : 0;
3147 	u8 *cmd;
3148 	int ret;
3149 
3150 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
3151 	if (!skb) {
3152 		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
3153 		return -ENOMEM;
3154 	}
3155 	skb_put(skb, H2C_TSF32_TOGL_LEN);
3156 	cmd = skb->data;
3157 
3158 	RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx);
3159 	RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
3160 	RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port);
3161 	RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
3162 
3163 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3164 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3165 			      H2C_FUNC_TSF32_TOGL, 0, 0,
3166 			      H2C_TSF32_TOGL_LEN);
3167 
3168 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3169 	if (ret) {
3170 		rtw89_err(rtwdev, "failed to send h2c\n");
3171 		goto fail;
3172 	}
3173 
3174 	return 0;
3175 fail:
3176 	dev_kfree_skb_any(skb);
3177 
3178 	return ret;
3179 }
3180 
3181 #define H2C_OFLD_CFG_LEN 8
3182 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
3183 {
3184 	static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
3185 	struct sk_buff *skb;
3186 	int ret;
3187 
3188 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
3189 	if (!skb) {
3190 		rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
3191 		return -ENOMEM;
3192 	}
3193 	skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
3194 
3195 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3196 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3197 			      H2C_FUNC_OFLD_CFG, 0, 1,
3198 			      H2C_OFLD_CFG_LEN);
3199 
3200 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3201 	if (ret) {
3202 		rtw89_err(rtwdev, "failed to send h2c\n");
3203 		goto fail;
3204 	}
3205 
3206 	return 0;
3207 fail:
3208 	dev_kfree_skb_any(skb);
3209 
3210 	return ret;
3211 }
3212 
3213 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
3214 				  struct ieee80211_vif *vif,
3215 				  bool connect)
3216 {
3217 	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
3218 	struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL;
3219 	s32 thold = RTW89_DEFAULT_CQM_THOLD;
3220 	u32 hyst = RTW89_DEFAULT_CQM_HYST;
3221 	struct rtw89_h2c_bcnfltr *h2c;
3222 	u32 len = sizeof(*h2c);
3223 	struct sk_buff *skb;
3224 	int ret;
3225 
3226 	if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
3227 		return -EINVAL;
3228 
3229 	if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA)
3230 		return -EINVAL;
3231 
3232 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3233 	if (!skb) {
3234 		rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
3235 		return -ENOMEM;
3236 	}
3237 
3238 	skb_put(skb, len);
3239 	h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
3240 
3241 	if (bss_conf->cqm_rssi_hyst)
3242 		hyst = bss_conf->cqm_rssi_hyst;
3243 	if (bss_conf->cqm_rssi_thold)
3244 		thold = bss_conf->cqm_rssi_thold;
3245 
3246 	h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
3247 		  le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
3248 		  le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
3249 		  le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
3250 				   RTW89_H2C_BCNFLTR_W0_MODE) |
3251 		  le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
3252 		  le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
3253 		  le32_encode_bits(thold + MAX_RSSI,
3254 				   RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
3255 		  le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
3256 
3257 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3258 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3259 			      H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
3260 
3261 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3262 	if (ret) {
3263 		rtw89_err(rtwdev, "failed to send h2c\n");
3264 		goto fail;
3265 	}
3266 
3267 	return 0;
3268 fail:
3269 	dev_kfree_skb_any(skb);
3270 
3271 	return ret;
3272 }
3273 
3274 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
3275 			      struct rtw89_rx_phy_ppdu *phy_ppdu)
3276 {
3277 	struct rtw89_h2c_ofld_rssi *h2c;
3278 	u32 len = sizeof(*h2c);
3279 	struct sk_buff *skb;
3280 	s8 rssi;
3281 	int ret;
3282 
3283 	if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
3284 		return -EINVAL;
3285 
3286 	if (!phy_ppdu)
3287 		return -EINVAL;
3288 
3289 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3290 	if (!skb) {
3291 		rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
3292 		return -ENOMEM;
3293 	}
3294 
3295 	rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
3296 	skb_put(skb, len);
3297 	h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
3298 
3299 	h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
3300 		  le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
3301 	h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
3302 
3303 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3304 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3305 			      H2C_FUNC_OFLD_RSSI, 0, 1, len);
3306 
3307 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3308 	if (ret) {
3309 		rtw89_err(rtwdev, "failed to send h2c\n");
3310 		goto fail;
3311 	}
3312 
3313 	return 0;
3314 fail:
3315 	dev_kfree_skb_any(skb);
3316 
3317 	return ret;
3318 }
3319 
3320 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
3321 {
3322 	struct rtw89_traffic_stats *stats = &rtwvif->stats;
3323 	struct rtw89_h2c_ofld *h2c;
3324 	u32 len = sizeof(*h2c);
3325 	struct sk_buff *skb;
3326 	int ret;
3327 
3328 	if (rtwvif->net_type != RTW89_NET_TYPE_INFRA)
3329 		return -EINVAL;
3330 
3331 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3332 	if (!skb) {
3333 		rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
3334 		return -ENOMEM;
3335 	}
3336 
3337 	skb_put(skb, len);
3338 	h2c = (struct rtw89_h2c_ofld *)skb->data;
3339 
3340 	h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
3341 		  le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
3342 		  le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
3343 
3344 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3345 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3346 			      H2C_FUNC_OFLD_TP, 0, 1, len);
3347 
3348 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3349 	if (ret) {
3350 		rtw89_err(rtwdev, "failed to send h2c\n");
3351 		goto fail;
3352 	}
3353 
3354 	return 0;
3355 fail:
3356 	dev_kfree_skb_any(skb);
3357 
3358 	return ret;
3359 }
3360 
3361 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
3362 {
3363 	const struct rtw89_chip_info *chip = rtwdev->chip;
3364 	struct rtw89_h2c_ra_v1 *h2c_v1;
3365 	struct rtw89_h2c_ra *h2c;
3366 	u32 len = sizeof(*h2c);
3367 	bool format_v1 = false;
3368 	struct sk_buff *skb;
3369 	int ret;
3370 
3371 	if (chip->chip_gen == RTW89_CHIP_BE) {
3372 		len = sizeof(*h2c_v1);
3373 		format_v1 = true;
3374 	}
3375 
3376 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3377 	if (!skb) {
3378 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
3379 		return -ENOMEM;
3380 	}
3381 	skb_put(skb, len);
3382 	h2c = (struct rtw89_h2c_ra *)skb->data;
3383 	rtw89_debug(rtwdev, RTW89_DBG_RA,
3384 		    "ra cmd msk: %llx ", ra->ra_mask);
3385 
3386 	h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
3387 		  le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
3388 		  le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
3389 		  le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
3390 		  le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
3391 		  le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
3392 		  le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
3393 		  le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
3394 		  le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
3395 		  le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
3396 		  le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
3397 		  le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
3398 		  le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
3399 		  le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
3400 	h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
3401 	h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
3402 	h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
3403 		  le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
3404 
3405 	if (!format_v1)
3406 		goto csi;
3407 
3408 	h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
3409 	h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
3410 		     le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
3411 
3412 csi:
3413 	if (!csi)
3414 		goto done;
3415 
3416 	h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
3417 	h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
3418 		   le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
3419 		   le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
3420 		   le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
3421 		   le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
3422 		   le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
3423 		   le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
3424 		   le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
3425 
3426 done:
3427 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3428 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
3429 			      H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
3430 			      len);
3431 
3432 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3433 	if (ret) {
3434 		rtw89_err(rtwdev, "failed to send h2c\n");
3435 		goto fail;
3436 	}
3437 
3438 	return 0;
3439 fail:
3440 	dev_kfree_skb_any(skb);
3441 
3442 	return ret;
3443 }
3444 
3445 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
3446 {
3447 	struct rtw89_btc *btc = &rtwdev->btc;
3448 	struct rtw89_btc_dm *dm = &btc->dm;
3449 	struct rtw89_btc_init_info *init_info = &dm->init_info;
3450 	struct rtw89_btc_module *module = &init_info->module;
3451 	struct rtw89_btc_ant_info *ant = &module->ant;
3452 	struct rtw89_h2c_cxinit *h2c;
3453 	u32 len = sizeof(*h2c);
3454 	struct sk_buff *skb;
3455 	int ret;
3456 
3457 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3458 	if (!skb) {
3459 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
3460 		return -ENOMEM;
3461 	}
3462 	skb_put(skb, len);
3463 	h2c = (struct rtw89_h2c_cxinit *)skb->data;
3464 
3465 	h2c->hdr.type = CXDRVINFO_INIT;
3466 	h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
3467 
3468 	h2c->ant_type = ant->type;
3469 	h2c->ant_num = ant->num;
3470 	h2c->ant_iso = ant->isolation;
3471 	h2c->ant_info =
3472 		u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
3473 		u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
3474 		u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
3475 		u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
3476 
3477 	h2c->mod_rfe = module->rfe_type;
3478 	h2c->mod_cv = module->cv;
3479 	h2c->mod_info =
3480 		u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
3481 		u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
3482 		u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
3483 		u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
3484 	h2c->mod_adie_kt = module->kt_ver_adie;
3485 	h2c->wl_gch = init_info->wl_guard_ch;
3486 
3487 	h2c->info =
3488 		u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
3489 		u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
3490 		u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
3491 		u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
3492 		u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
3493 
3494 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3495 			      H2C_CAT_OUTSRC, BTFC_SET,
3496 			      SET_DRV_INFO, 0, 0,
3497 			      len);
3498 
3499 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3500 	if (ret) {
3501 		rtw89_err(rtwdev, "failed to send h2c\n");
3502 		goto fail;
3503 	}
3504 
3505 	return 0;
3506 fail:
3507 	dev_kfree_skb_any(skb);
3508 
3509 	return ret;
3510 }
3511 
3512 #define PORT_DATA_OFFSET 4
3513 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
3514 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
3515 	(4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
3516 
3517 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
3518 {
3519 	struct rtw89_btc *btc = &rtwdev->btc;
3520 	const struct rtw89_btc_ver *ver = btc->ver;
3521 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
3522 	struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
3523 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
3524 	struct rtw89_btc_wl_active_role *active = role_info->active_role;
3525 	struct sk_buff *skb;
3526 	u32 len;
3527 	u8 offset = 0;
3528 	u8 *cmd;
3529 	int ret;
3530 	int i;
3531 
3532 	len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
3533 
3534 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3535 	if (!skb) {
3536 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
3537 		return -ENOMEM;
3538 	}
3539 	skb_put(skb, len);
3540 	cmd = skb->data;
3541 
3542 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
3543 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
3544 
3545 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
3546 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
3547 
3548 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
3549 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
3550 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
3551 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
3552 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
3553 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
3554 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
3555 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
3556 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
3557 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
3558 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
3559 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
3560 
3561 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
3562 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
3563 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
3564 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
3565 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
3566 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
3567 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
3568 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
3569 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
3570 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
3571 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
3572 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
3573 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
3574 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
3575 	}
3576 
3577 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3578 			      H2C_CAT_OUTSRC, BTFC_SET,
3579 			      SET_DRV_INFO, 0, 0,
3580 			      len);
3581 
3582 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3583 	if (ret) {
3584 		rtw89_err(rtwdev, "failed to send h2c\n");
3585 		goto fail;
3586 	}
3587 
3588 	return 0;
3589 fail:
3590 	dev_kfree_skb_any(skb);
3591 
3592 	return ret;
3593 }
3594 
3595 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
3596 	(4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
3597 
3598 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
3599 {
3600 	struct rtw89_btc *btc = &rtwdev->btc;
3601 	const struct rtw89_btc_ver *ver = btc->ver;
3602 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
3603 	struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
3604 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
3605 	struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
3606 	struct sk_buff *skb;
3607 	u32 len;
3608 	u8 *cmd, offset;
3609 	int ret;
3610 	int i;
3611 
3612 	len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
3613 
3614 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3615 	if (!skb) {
3616 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
3617 		return -ENOMEM;
3618 	}
3619 	skb_put(skb, len);
3620 	cmd = skb->data;
3621 
3622 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
3623 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
3624 
3625 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
3626 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
3627 
3628 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
3629 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
3630 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
3631 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
3632 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
3633 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
3634 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
3635 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
3636 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
3637 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
3638 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
3639 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
3640 
3641 	offset = PORT_DATA_OFFSET;
3642 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
3643 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
3644 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
3645 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
3646 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
3647 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
3648 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
3649 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
3650 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
3651 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
3652 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
3653 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
3654 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
3655 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
3656 		RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
3657 	}
3658 
3659 	offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
3660 	RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
3661 	RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
3662 	RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
3663 	RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
3664 	RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
3665 	RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
3666 
3667 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3668 			      H2C_CAT_OUTSRC, BTFC_SET,
3669 			      SET_DRV_INFO, 0, 0,
3670 			      len);
3671 
3672 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3673 	if (ret) {
3674 		rtw89_err(rtwdev, "failed to send h2c\n");
3675 		goto fail;
3676 	}
3677 
3678 	return 0;
3679 fail:
3680 	dev_kfree_skb_any(skb);
3681 
3682 	return ret;
3683 }
3684 
3685 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
3686 	(4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
3687 
3688 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev)
3689 {
3690 	struct rtw89_btc *btc = &rtwdev->btc;
3691 	const struct rtw89_btc_ver *ver = btc->ver;
3692 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
3693 	struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
3694 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
3695 	struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
3696 	struct sk_buff *skb;
3697 	u32 len;
3698 	u8 *cmd, offset;
3699 	int ret;
3700 	int i;
3701 
3702 	len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
3703 
3704 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3705 	if (!skb) {
3706 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
3707 		return -ENOMEM;
3708 	}
3709 	skb_put(skb, len);
3710 	cmd = skb->data;
3711 
3712 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
3713 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
3714 
3715 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
3716 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
3717 
3718 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
3719 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
3720 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
3721 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
3722 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
3723 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
3724 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
3725 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
3726 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
3727 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
3728 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
3729 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
3730 
3731 	offset = PORT_DATA_OFFSET;
3732 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
3733 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
3734 		RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
3735 		RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
3736 		RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
3737 		RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
3738 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
3739 		RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
3740 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
3741 		RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
3742 		RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
3743 	}
3744 
3745 	offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
3746 	RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
3747 	RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
3748 	RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
3749 	RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
3750 	RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
3751 	RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
3752 
3753 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3754 			      H2C_CAT_OUTSRC, BTFC_SET,
3755 			      SET_DRV_INFO, 0, 0,
3756 			      len);
3757 
3758 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3759 	if (ret) {
3760 		rtw89_err(rtwdev, "failed to send h2c\n");
3761 		goto fail;
3762 	}
3763 
3764 	return 0;
3765 fail:
3766 	dev_kfree_skb_any(skb);
3767 
3768 	return ret;
3769 }
3770 
3771 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
3772 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
3773 {
3774 	struct rtw89_btc *btc = &rtwdev->btc;
3775 	const struct rtw89_btc_ver *ver = btc->ver;
3776 	struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
3777 	struct sk_buff *skb;
3778 	u8 *cmd;
3779 	int ret;
3780 
3781 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
3782 	if (!skb) {
3783 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
3784 		return -ENOMEM;
3785 	}
3786 	skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
3787 	cmd = skb->data;
3788 
3789 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
3790 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
3791 
3792 	RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
3793 	RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
3794 	RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
3795 	if (ver->fcxctrl == 0)
3796 		RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
3797 
3798 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3799 			      H2C_CAT_OUTSRC, BTFC_SET,
3800 			      SET_DRV_INFO, 0, 0,
3801 			      H2C_LEN_CXDRVINFO_CTRL);
3802 
3803 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3804 	if (ret) {
3805 		rtw89_err(rtwdev, "failed to send h2c\n");
3806 		goto fail;
3807 	}
3808 
3809 	return 0;
3810 fail:
3811 	dev_kfree_skb_any(skb);
3812 
3813 	return ret;
3814 }
3815 
3816 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
3817 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev)
3818 {
3819 	struct rtw89_btc *btc = &rtwdev->btc;
3820 	struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
3821 	struct sk_buff *skb;
3822 	u8 *cmd;
3823 	int ret;
3824 
3825 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
3826 	if (!skb) {
3827 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
3828 		return -ENOMEM;
3829 	}
3830 	skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
3831 	cmd = skb->data;
3832 
3833 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX);
3834 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
3835 
3836 	RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
3837 	RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
3838 	RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
3839 	RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
3840 	RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
3841 	RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
3842 	RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
3843 	RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
3844 	RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
3845 	RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
3846 	RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
3847 	RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
3848 	RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
3849 	RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
3850 	RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
3851 	RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
3852 	RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
3853 
3854 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3855 			      H2C_CAT_OUTSRC, BTFC_SET,
3856 			      SET_DRV_INFO, 0, 0,
3857 			      H2C_LEN_CXDRVINFO_TRX);
3858 
3859 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3860 	if (ret) {
3861 		rtw89_err(rtwdev, "failed to send h2c\n");
3862 		goto fail;
3863 	}
3864 
3865 	return 0;
3866 fail:
3867 	dev_kfree_skb_any(skb);
3868 
3869 	return ret;
3870 }
3871 
3872 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
3873 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
3874 {
3875 	struct rtw89_btc *btc = &rtwdev->btc;
3876 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
3877 	struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
3878 	struct sk_buff *skb;
3879 	u8 *cmd;
3880 	int ret;
3881 
3882 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
3883 	if (!skb) {
3884 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
3885 		return -ENOMEM;
3886 	}
3887 	skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
3888 	cmd = skb->data;
3889 
3890 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
3891 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
3892 
3893 	RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
3894 	RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
3895 	RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
3896 	RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
3897 	RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
3898 
3899 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3900 			      H2C_CAT_OUTSRC, BTFC_SET,
3901 			      SET_DRV_INFO, 0, 0,
3902 			      H2C_LEN_CXDRVINFO_RFK);
3903 
3904 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3905 	if (ret) {
3906 		rtw89_err(rtwdev, "failed to send h2c\n");
3907 		goto fail;
3908 	}
3909 
3910 	return 0;
3911 fail:
3912 	dev_kfree_skb_any(skb);
3913 
3914 	return ret;
3915 }
3916 
3917 #define H2C_LEN_PKT_OFLD 4
3918 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
3919 {
3920 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3921 	struct sk_buff *skb;
3922 	unsigned int cond;
3923 	u8 *cmd;
3924 	int ret;
3925 
3926 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
3927 	if (!skb) {
3928 		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
3929 		return -ENOMEM;
3930 	}
3931 	skb_put(skb, H2C_LEN_PKT_OFLD);
3932 	cmd = skb->data;
3933 
3934 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
3935 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
3936 
3937 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3938 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3939 			      H2C_FUNC_PACKET_OFLD, 1, 1,
3940 			      H2C_LEN_PKT_OFLD);
3941 
3942 	cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
3943 
3944 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3945 	if (ret < 0) {
3946 		rtw89_debug(rtwdev, RTW89_DBG_FW,
3947 			    "failed to del pkt ofld: id %d, ret %d\n",
3948 			    id, ret);
3949 		return ret;
3950 	}
3951 
3952 	rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
3953 	return 0;
3954 }
3955 
3956 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
3957 				 struct sk_buff *skb_ofld)
3958 {
3959 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
3960 	struct sk_buff *skb;
3961 	unsigned int cond;
3962 	u8 *cmd;
3963 	u8 alloc_id;
3964 	int ret;
3965 
3966 	alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
3967 					      RTW89_MAX_PKT_OFLD_NUM);
3968 	if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
3969 		return -ENOSPC;
3970 
3971 	*id = alloc_id;
3972 
3973 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
3974 	if (!skb) {
3975 		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
3976 		rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
3977 		return -ENOMEM;
3978 	}
3979 	skb_put(skb, H2C_LEN_PKT_OFLD);
3980 	cmd = skb->data;
3981 
3982 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
3983 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
3984 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
3985 	skb_put_data(skb, skb_ofld->data, skb_ofld->len);
3986 
3987 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3988 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
3989 			      H2C_FUNC_PACKET_OFLD, 1, 1,
3990 			      H2C_LEN_PKT_OFLD + skb_ofld->len);
3991 
3992 	cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
3993 
3994 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
3995 	if (ret < 0) {
3996 		rtw89_debug(rtwdev, RTW89_DBG_FW,
3997 			    "failed to add pkt ofld: id %d, ret %d\n",
3998 			    alloc_id, ret);
3999 		rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
4000 		return ret;
4001 	}
4002 
4003 	return 0;
4004 }
4005 
4006 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
4007 				   struct list_head *chan_list)
4008 {
4009 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
4010 	struct rtw89_h2c_chinfo_elem *elem;
4011 	struct rtw89_mac_chinfo *ch_info;
4012 	struct rtw89_h2c_chinfo *h2c;
4013 	struct sk_buff *skb;
4014 	unsigned int cond;
4015 	int skb_len;
4016 	int ret;
4017 
4018 	static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
4019 
4020 	skb_len = struct_size(h2c, elem, ch_num);
4021 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
4022 	if (!skb) {
4023 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
4024 		return -ENOMEM;
4025 	}
4026 	skb_put(skb, sizeof(*h2c));
4027 	h2c = (struct rtw89_h2c_chinfo *)skb->data;
4028 
4029 	h2c->ch_num = ch_num;
4030 	h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
4031 
4032 	list_for_each_entry(ch_info, chan_list, list) {
4033 		elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
4034 
4035 		elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
4036 			   le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
4037 			   le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
4038 			   le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
4039 
4040 		elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
4041 			   le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
4042 			   le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
4043 			   le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
4044 			   le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
4045 			   le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
4046 			   le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
4047 			   le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
4048 			   le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
4049 			   le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
4050 
4051 		elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
4052 			   le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
4053 			   le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
4054 			   le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
4055 
4056 		elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
4057 			   le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
4058 			   le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
4059 			   le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
4060 	}
4061 
4062 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4063 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4064 			      H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
4065 
4066 	cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
4067 
4068 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4069 	if (ret) {
4070 		rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
4071 		return ret;
4072 	}
4073 
4074 	return 0;
4075 }
4076 
4077 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev,
4078 			      struct rtw89_scan_option *option,
4079 			      struct rtw89_vif *rtwvif)
4080 {
4081 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
4082 	struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
4083 	struct rtw89_h2c_scanofld *h2c;
4084 	u32 len = sizeof(*h2c);
4085 	struct sk_buff *skb;
4086 	unsigned int cond;
4087 	int ret;
4088 
4089 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4090 	if (!skb) {
4091 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
4092 		return -ENOMEM;
4093 	}
4094 	skb_put(skb, len);
4095 	h2c = (struct rtw89_h2c_scanofld *)skb->data;
4096 
4097 	h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
4098 		  le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
4099 		  le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) |
4100 		  le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
4101 
4102 	h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
4103 		  le32_encode_bits(option->target_ch_mode,
4104 				   RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
4105 		  le32_encode_bits(RTW89_SCAN_IMMEDIATE,
4106 				   RTW89_H2C_SCANOFLD_W1_START_MODE) |
4107 		  le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
4108 
4109 	if (option->target_ch_mode) {
4110 		h2c->w1 |= le32_encode_bits(op->band_width,
4111 					    RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
4112 			   le32_encode_bits(op->primary_channel,
4113 					    RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
4114 			   le32_encode_bits(op->channel,
4115 					    RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
4116 		h2c->w0 |= le32_encode_bits(op->band_type,
4117 					    RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
4118 	}
4119 
4120 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4121 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4122 			      H2C_FUNC_SCANOFLD, 1, 1,
4123 			      len);
4124 
4125 	if (option->enable)
4126 		cond = RTW89_SCANOFLD_WAIT_COND_START;
4127 	else
4128 		cond = RTW89_SCANOFLD_WAIT_COND_STOP;
4129 
4130 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
4131 	if (ret) {
4132 		rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
4133 		return ret;
4134 	}
4135 
4136 	return 0;
4137 }
4138 
4139 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
4140 			struct rtw89_fw_h2c_rf_reg_info *info,
4141 			u16 len, u8 page)
4142 {
4143 	struct sk_buff *skb;
4144 	u8 class = info->rf_path == RF_PATH_A ?
4145 		   H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
4146 	int ret;
4147 
4148 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4149 	if (!skb) {
4150 		rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
4151 		return -ENOMEM;
4152 	}
4153 	skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
4154 
4155 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4156 			      H2C_CAT_OUTSRC, class, page, 0, 0,
4157 			      len);
4158 
4159 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4160 	if (ret) {
4161 		rtw89_err(rtwdev, "failed to send h2c\n");
4162 		goto fail;
4163 	}
4164 
4165 	return 0;
4166 fail:
4167 	dev_kfree_skb_any(skb);
4168 
4169 	return ret;
4170 }
4171 
4172 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
4173 {
4174 	struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
4175 	struct rtw89_fw_h2c_rf_get_mccch *mccch;
4176 	struct sk_buff *skb;
4177 	int ret;
4178 	u8 idx;
4179 
4180 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
4181 	if (!skb) {
4182 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
4183 		return -ENOMEM;
4184 	}
4185 	skb_put(skb, sizeof(*mccch));
4186 	mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
4187 
4188 	idx = rfk_mcc->table_idx;
4189 	mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
4190 	mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
4191 	mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
4192 	mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
4193 	mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
4194 	mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
4195 
4196 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4197 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
4198 			      H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
4199 			      sizeof(*mccch));
4200 
4201 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4202 	if (ret) {
4203 		rtw89_err(rtwdev, "failed to send h2c\n");
4204 		goto fail;
4205 	}
4206 
4207 	return 0;
4208 fail:
4209 	dev_kfree_skb_any(skb);
4210 
4211 	return ret;
4212 }
4213 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
4214 
4215 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
4216 			      u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
4217 			      bool rack, bool dack)
4218 {
4219 	struct sk_buff *skb;
4220 	int ret;
4221 
4222 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4223 	if (!skb) {
4224 		rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
4225 		return -ENOMEM;
4226 	}
4227 	skb_put_data(skb, buf, len);
4228 
4229 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4230 			      H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
4231 			      len);
4232 
4233 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4234 	if (ret) {
4235 		rtw89_err(rtwdev, "failed to send h2c\n");
4236 		goto fail;
4237 	}
4238 
4239 	return 0;
4240 fail:
4241 	dev_kfree_skb_any(skb);
4242 
4243 	return ret;
4244 }
4245 
4246 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
4247 {
4248 	struct sk_buff *skb;
4249 	int ret;
4250 
4251 	skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
4252 	if (!skb) {
4253 		rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
4254 		return -ENOMEM;
4255 	}
4256 	skb_put_data(skb, buf, len);
4257 
4258 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4259 	if (ret) {
4260 		rtw89_err(rtwdev, "failed to send h2c\n");
4261 		goto fail;
4262 	}
4263 
4264 	return 0;
4265 fail:
4266 	dev_kfree_skb_any(skb);
4267 
4268 	return ret;
4269 }
4270 
4271 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
4272 {
4273 	struct rtw89_early_h2c *early_h2c;
4274 
4275 	lockdep_assert_held(&rtwdev->mutex);
4276 
4277 	list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
4278 		rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
4279 	}
4280 }
4281 
4282 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
4283 {
4284 	struct rtw89_early_h2c *early_h2c, *tmp;
4285 
4286 	mutex_lock(&rtwdev->mutex);
4287 	list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
4288 		list_del(&early_h2c->list);
4289 		kfree(early_h2c->h2c);
4290 		kfree(early_h2c);
4291 	}
4292 	mutex_unlock(&rtwdev->mutex);
4293 }
4294 
4295 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
4296 {
4297 	const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
4298 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
4299 
4300 	attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
4301 	attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
4302 	attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
4303 	attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
4304 }
4305 
4306 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
4307 				    struct sk_buff *c2h)
4308 {
4309 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
4310 	u8 category = attr->category;
4311 	u8 class = attr->class;
4312 	u8 func = attr->func;
4313 
4314 	switch (category) {
4315 	default:
4316 		return false;
4317 	case RTW89_C2H_CAT_MAC:
4318 		return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
4319 	case RTW89_C2H_CAT_OUTSRC:
4320 		return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
4321 	}
4322 }
4323 
4324 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
4325 {
4326 	rtw89_fw_c2h_parse_attr(c2h);
4327 	if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
4328 		goto enqueue;
4329 
4330 	rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
4331 	dev_kfree_skb_any(c2h);
4332 	return;
4333 
4334 enqueue:
4335 	skb_queue_tail(&rtwdev->c2h_queue, c2h);
4336 	ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
4337 }
4338 
4339 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
4340 				    struct sk_buff *skb)
4341 {
4342 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
4343 	u8 category = attr->category;
4344 	u8 class = attr->class;
4345 	u8 func = attr->func;
4346 	u16 len = attr->len;
4347 	bool dump = true;
4348 
4349 	if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
4350 		return;
4351 
4352 	switch (category) {
4353 	case RTW89_C2H_CAT_TEST:
4354 		break;
4355 	case RTW89_C2H_CAT_MAC:
4356 		rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
4357 		if (class == RTW89_MAC_C2H_CLASS_INFO &&
4358 		    func == RTW89_MAC_C2H_FUNC_C2H_LOG)
4359 			dump = false;
4360 		break;
4361 	case RTW89_C2H_CAT_OUTSRC:
4362 		if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
4363 		    class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
4364 			rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
4365 		else
4366 			rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
4367 		break;
4368 	}
4369 
4370 	if (dump)
4371 		rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
4372 }
4373 
4374 void rtw89_fw_c2h_work(struct work_struct *work)
4375 {
4376 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
4377 						c2h_work);
4378 	struct sk_buff *skb, *tmp;
4379 
4380 	skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
4381 		skb_unlink(skb, &rtwdev->c2h_queue);
4382 		mutex_lock(&rtwdev->mutex);
4383 		rtw89_fw_c2h_cmd_handle(rtwdev, skb);
4384 		mutex_unlock(&rtwdev->mutex);
4385 		dev_kfree_skb_any(skb);
4386 	}
4387 }
4388 
4389 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
4390 				  struct rtw89_mac_h2c_info *info)
4391 {
4392 	const struct rtw89_chip_info *chip = rtwdev->chip;
4393 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
4394 	const u32 *h2c_reg = chip->h2c_regs;
4395 	u8 i, val, len;
4396 	int ret;
4397 
4398 	ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
4399 				rtwdev, chip->h2c_ctrl_reg);
4400 	if (ret) {
4401 		rtw89_warn(rtwdev, "FW does not process h2c registers\n");
4402 		return ret;
4403 	}
4404 
4405 	len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
4406 			   sizeof(info->u.h2creg[0]));
4407 
4408 	u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
4409 	u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
4410 
4411 	for (i = 0; i < RTW89_H2CREG_MAX; i++)
4412 		rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
4413 
4414 	fw_info->h2c_counter++;
4415 	rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
4416 			  chip->h2c_counter_reg.mask, fw_info->h2c_counter);
4417 	rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
4418 
4419 	return 0;
4420 }
4421 
4422 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
4423 				 struct rtw89_mac_c2h_info *info)
4424 {
4425 	const struct rtw89_chip_info *chip = rtwdev->chip;
4426 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
4427 	const u32 *c2h_reg = chip->c2h_regs;
4428 	u32 ret;
4429 	u8 i, val;
4430 
4431 	info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
4432 
4433 	ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
4434 				       RTW89_C2H_TIMEOUT, false, rtwdev,
4435 				       chip->c2h_ctrl_reg);
4436 	if (ret) {
4437 		rtw89_warn(rtwdev, "c2h reg timeout\n");
4438 		return ret;
4439 	}
4440 
4441 	for (i = 0; i < RTW89_C2HREG_MAX; i++)
4442 		info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
4443 
4444 	rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
4445 
4446 	info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
4447 	info->content_len =
4448 		(u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
4449 		RTW89_C2HREG_HDR_LEN;
4450 
4451 	fw_info->c2h_counter++;
4452 	rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
4453 			  chip->c2h_counter_reg.mask, fw_info->c2h_counter);
4454 
4455 	return 0;
4456 }
4457 
4458 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
4459 		     struct rtw89_mac_h2c_info *h2c_info,
4460 		     struct rtw89_mac_c2h_info *c2h_info)
4461 {
4462 	u32 ret;
4463 
4464 	if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
4465 		lockdep_assert_held(&rtwdev->mutex);
4466 
4467 	if (!h2c_info && !c2h_info)
4468 		return -EINVAL;
4469 
4470 	if (!h2c_info)
4471 		goto recv_c2h;
4472 
4473 	ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
4474 	if (ret)
4475 		return ret;
4476 
4477 recv_c2h:
4478 	if (!c2h_info)
4479 		return 0;
4480 
4481 	ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
4482 	if (ret)
4483 		return ret;
4484 
4485 	return 0;
4486 }
4487 
4488 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
4489 {
4490 	if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
4491 		rtw89_err(rtwdev, "[ERR]pwr is off\n");
4492 		return;
4493 	}
4494 
4495 	rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
4496 	rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
4497 	rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
4498 	rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
4499 	rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
4500 		   rtw89_read32(rtwdev, R_AX_HALT_C2H));
4501 	rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
4502 		   rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
4503 
4504 	rtw89_fw_prog_cnt_dump(rtwdev);
4505 }
4506 
4507 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
4508 {
4509 	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
4510 	struct rtw89_pktofld_info *info, *tmp;
4511 	u8 idx;
4512 
4513 	for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
4514 		if (!(rtwdev->chip->support_bands & BIT(idx)))
4515 			continue;
4516 
4517 		list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
4518 			if (test_bit(info->id, rtwdev->pkt_offload))
4519 				rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
4520 			list_del(&info->list);
4521 			kfree(info);
4522 		}
4523 	}
4524 }
4525 
4526 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
4527 					     struct rtw89_vif *rtwvif,
4528 					     struct rtw89_pktofld_info *info,
4529 					     enum nl80211_band band, u8 ssid_idx)
4530 {
4531 	struct cfg80211_scan_request *req = rtwvif->scan_req;
4532 
4533 	if (band != NL80211_BAND_6GHZ)
4534 		return false;
4535 
4536 	if (req->ssids[ssid_idx].ssid_len) {
4537 		memcpy(info->ssid, req->ssids[ssid_idx].ssid,
4538 		       req->ssids[ssid_idx].ssid_len);
4539 		info->ssid_len = req->ssids[ssid_idx].ssid_len;
4540 		return false;
4541 	} else {
4542 		return true;
4543 	}
4544 }
4545 
4546 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
4547 				     struct rtw89_vif *rtwvif,
4548 				     struct sk_buff *skb, u8 ssid_idx)
4549 {
4550 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
4551 	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
4552 	struct rtw89_pktofld_info *info;
4553 	struct sk_buff *new;
4554 	int ret = 0;
4555 	u8 band;
4556 
4557 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
4558 		if (!(rtwdev->chip->support_bands & BIT(band)))
4559 			continue;
4560 
4561 		new = skb_copy(skb, GFP_KERNEL);
4562 		if (!new) {
4563 			ret = -ENOMEM;
4564 			goto out;
4565 		}
4566 		skb_put_data(new, ies->ies[band], ies->len[band]);
4567 		skb_put_data(new, ies->common_ies, ies->common_ie_len);
4568 
4569 		info = kzalloc(sizeof(*info), GFP_KERNEL);
4570 		if (!info) {
4571 			ret = -ENOMEM;
4572 			kfree_skb(new);
4573 			goto out;
4574 		}
4575 
4576 		if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
4577 						     ssid_idx)) {
4578 			kfree_skb(new);
4579 			kfree(info);
4580 			goto out;
4581 		}
4582 
4583 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
4584 		if (ret) {
4585 			kfree_skb(new);
4586 			kfree(info);
4587 			goto out;
4588 		}
4589 
4590 		list_add_tail(&info->list, &scan_info->pkt_list[band]);
4591 		kfree_skb(new);
4592 	}
4593 out:
4594 	return ret;
4595 }
4596 
4597 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
4598 					  struct rtw89_vif *rtwvif)
4599 {
4600 	struct cfg80211_scan_request *req = rtwvif->scan_req;
4601 	struct sk_buff *skb;
4602 	u8 num = req->n_ssids, i;
4603 	int ret;
4604 
4605 	for (i = 0; i < num; i++) {
4606 		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
4607 					     req->ssids[i].ssid,
4608 					     req->ssids[i].ssid_len,
4609 					     req->ie_len);
4610 		if (!skb)
4611 			return -ENOMEM;
4612 
4613 		ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i);
4614 		kfree_skb(skb);
4615 
4616 		if (ret)
4617 			return ret;
4618 	}
4619 
4620 	return 0;
4621 }
4622 
4623 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
4624 				      struct cfg80211_scan_request *req,
4625 				      struct rtw89_mac_chinfo *ch_info)
4626 {
4627 	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
4628 	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
4629 	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
4630 	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
4631 	struct cfg80211_scan_6ghz_params *params;
4632 	struct rtw89_pktofld_info *info, *tmp;
4633 	struct ieee80211_hdr *hdr;
4634 	struct sk_buff *skb;
4635 	bool found;
4636 	int ret = 0;
4637 	u8 i;
4638 
4639 	if (!req->n_6ghz_params)
4640 		return 0;
4641 
4642 	for (i = 0; i < req->n_6ghz_params; i++) {
4643 		params = &req->scan_6ghz_params[i];
4644 
4645 		if (req->channels[params->channel_idx]->hw_value !=
4646 		    ch_info->pri_ch)
4647 			continue;
4648 
4649 		found = false;
4650 		list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
4651 			if (ether_addr_equal(tmp->bssid, params->bssid)) {
4652 				found = true;
4653 				break;
4654 			}
4655 		}
4656 		if (found)
4657 			continue;
4658 
4659 		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr,
4660 					     NULL, 0, req->ie_len);
4661 		skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
4662 		skb_put_data(skb, ies->common_ies, ies->common_ie_len);
4663 		hdr = (struct ieee80211_hdr *)skb->data;
4664 		ether_addr_copy(hdr->addr3, params->bssid);
4665 
4666 		info = kzalloc(sizeof(*info), GFP_KERNEL);
4667 		if (!info) {
4668 			ret = -ENOMEM;
4669 			kfree_skb(skb);
4670 			goto out;
4671 		}
4672 
4673 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
4674 		if (ret) {
4675 			kfree_skb(skb);
4676 			kfree(info);
4677 			goto out;
4678 		}
4679 
4680 		ether_addr_copy(info->bssid, params->bssid);
4681 		info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
4682 		list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
4683 
4684 		ch_info->tx_pkt = true;
4685 		ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
4686 
4687 		kfree_skb(skb);
4688 	}
4689 
4690 out:
4691 	return ret;
4692 }
4693 
4694 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
4695 				   int ssid_num,
4696 				   struct rtw89_mac_chinfo *ch_info)
4697 {
4698 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
4699 	struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif;
4700 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
4701 	struct cfg80211_scan_request *req = rtwvif->scan_req;
4702 	struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
4703 	struct rtw89_pktofld_info *info;
4704 	u8 band, probe_count = 0;
4705 	int ret;
4706 
4707 	ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
4708 	ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
4709 	ch_info->bw = RTW89_SCAN_WIDTH;
4710 	ch_info->tx_pkt = true;
4711 	ch_info->cfg_tx_pwr = false;
4712 	ch_info->tx_pwr_idx = 0;
4713 	ch_info->tx_null = false;
4714 	ch_info->pause_data = false;
4715 	ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
4716 
4717 	if (ch_info->ch_band == RTW89_BAND_6G) {
4718 		if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
4719 		    !ch_info->is_psc) {
4720 			ch_info->tx_pkt = false;
4721 			if (!req->duration_mandatory)
4722 				ch_info->period -= RTW89_DWELL_TIME_6G;
4723 		}
4724 	}
4725 
4726 	ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info);
4727 	if (ret)
4728 		rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
4729 
4730 	if (ssid_num) {
4731 		band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
4732 
4733 		list_for_each_entry(info, &scan_info->pkt_list[band], list) {
4734 			if (info->channel_6ghz &&
4735 			    ch_info->pri_ch != info->channel_6ghz)
4736 				continue;
4737 			else if (info->channel_6ghz && probe_count != 0)
4738 				ch_info->period += RTW89_CHANNEL_TIME_6G;
4739 			ch_info->pkt_id[probe_count++] = info->id;
4740 			if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
4741 				break;
4742 		}
4743 		ch_info->num_pkt = probe_count;
4744 	}
4745 
4746 	switch (chan_type) {
4747 	case RTW89_CHAN_OPERATE:
4748 		ch_info->central_ch = op->channel;
4749 		ch_info->pri_ch = op->primary_channel;
4750 		ch_info->ch_band = op->band_type;
4751 		ch_info->bw = op->band_width;
4752 		ch_info->tx_null = true;
4753 		ch_info->num_pkt = 0;
4754 		break;
4755 	case RTW89_CHAN_DFS:
4756 		if (ch_info->ch_band != RTW89_BAND_6G)
4757 			ch_info->period = max_t(u8, ch_info->period,
4758 						RTW89_DFS_CHAN_TIME);
4759 		ch_info->dwell_time = RTW89_DWELL_TIME;
4760 		break;
4761 	case RTW89_CHAN_ACTIVE:
4762 		break;
4763 	default:
4764 		rtw89_err(rtwdev, "Channel type out of bound\n");
4765 	}
4766 }
4767 
4768 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
4769 				       struct rtw89_vif *rtwvif, bool connected)
4770 {
4771 	struct cfg80211_scan_request *req = rtwvif->scan_req;
4772 	struct rtw89_mac_chinfo	*ch_info, *tmp;
4773 	struct ieee80211_channel *channel;
4774 	struct list_head chan_list;
4775 	bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
4776 	int list_len, off_chan_time = 0;
4777 	enum rtw89_chan_type type;
4778 	int ret = 0;
4779 	u32 idx;
4780 
4781 	INIT_LIST_HEAD(&chan_list);
4782 	for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
4783 	     idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT;
4784 	     idx++, list_len++) {
4785 		channel = req->channels[idx];
4786 		ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
4787 		if (!ch_info) {
4788 			ret = -ENOMEM;
4789 			goto out;
4790 		}
4791 
4792 		if (req->duration_mandatory)
4793 			ch_info->period = req->duration;
4794 		else if (channel->band == NL80211_BAND_6GHZ)
4795 			ch_info->period = RTW89_CHANNEL_TIME_6G +
4796 					  RTW89_DWELL_TIME_6G;
4797 		else
4798 			ch_info->period = RTW89_CHANNEL_TIME;
4799 
4800 		ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
4801 		ch_info->central_ch = channel->hw_value;
4802 		ch_info->pri_ch = channel->hw_value;
4803 		ch_info->rand_seq_num = random_seq;
4804 		ch_info->is_psc = cfg80211_channel_is_psc(channel);
4805 
4806 		if (channel->flags &
4807 		    (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
4808 			type = RTW89_CHAN_DFS;
4809 		else
4810 			type = RTW89_CHAN_ACTIVE;
4811 		rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
4812 
4813 		if (connected &&
4814 		    off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
4815 			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
4816 			if (!tmp) {
4817 				ret = -ENOMEM;
4818 				kfree(ch_info);
4819 				goto out;
4820 			}
4821 
4822 			type = RTW89_CHAN_OPERATE;
4823 			tmp->period = req->duration_mandatory ?
4824 				      req->duration : RTW89_CHANNEL_TIME;
4825 			rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
4826 			list_add_tail(&tmp->list, &chan_list);
4827 			off_chan_time = 0;
4828 			list_len++;
4829 		}
4830 		list_add_tail(&ch_info->list, &chan_list);
4831 		off_chan_time += ch_info->period;
4832 	}
4833 	rtwdev->scan_info.last_chan_idx = idx;
4834 	ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
4835 
4836 out:
4837 	list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
4838 		list_del(&ch_info->list);
4839 		kfree(ch_info);
4840 	}
4841 
4842 	return ret;
4843 }
4844 
4845 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
4846 				   struct rtw89_vif *rtwvif, bool connected)
4847 {
4848 	int ret;
4849 
4850 	ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif);
4851 	if (ret) {
4852 		rtw89_err(rtwdev, "Update probe request failed\n");
4853 		goto out;
4854 	}
4855 	ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected);
4856 out:
4857 	return ret;
4858 }
4859 
4860 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
4861 			 struct ieee80211_scan_request *scan_req)
4862 {
4863 	struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
4864 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
4865 	struct cfg80211_scan_request *req = &scan_req->req;
4866 	u32 rx_fltr = rtwdev->hal.rx_fltr;
4867 	u8 mac_addr[ETH_ALEN];
4868 
4869 	rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan);
4870 	rtwdev->scan_info.scanning_vif = vif;
4871 	rtwdev->scan_info.last_chan_idx = 0;
4872 	rtwdev->scan_info.abort = false;
4873 	rtwvif->scan_ies = &scan_req->ies;
4874 	rtwvif->scan_req = req;
4875 	ieee80211_stop_queues(rtwdev->hw);
4876 	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false);
4877 
4878 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
4879 		get_random_mask_addr(mac_addr, req->mac_addr,
4880 				     req->mac_addr_mask);
4881 	else
4882 		ether_addr_copy(mac_addr, vif->addr);
4883 	rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true);
4884 
4885 	rx_fltr &= ~B_AX_A_BCN_CHK_EN;
4886 	rx_fltr &= ~B_AX_A_BC;
4887 	rx_fltr &= ~B_AX_A_A1_MATCH;
4888 	rtw89_write32_mask(rtwdev,
4889 			   rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
4890 			   B_AX_RX_FLTR_CFG_MASK,
4891 			   rx_fltr);
4892 
4893 	rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
4894 }
4895 
4896 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
4897 			    bool aborted)
4898 {
4899 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
4900 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
4901 	struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif);
4902 	struct cfg80211_scan_info info = {
4903 		.aborted = aborted,
4904 	};
4905 
4906 	if (!vif)
4907 		return;
4908 
4909 	rtw89_write32_mask(rtwdev,
4910 			   rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
4911 			   B_AX_RX_FLTR_CFG_MASK,
4912 			   rtwdev->hal.rx_fltr);
4913 
4914 	rtw89_core_scan_complete(rtwdev, vif, true);
4915 	ieee80211_scan_completed(rtwdev->hw, &info);
4916 	ieee80211_wake_queues(rtwdev->hw);
4917 	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true);
4918 	rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
4919 
4920 	rtw89_release_pkt_list(rtwdev);
4921 	rtwvif->scan_req = NULL;
4922 	rtwvif->scan_ies = NULL;
4923 	scan_info->last_chan_idx = 0;
4924 	scan_info->scanning_vif = NULL;
4925 	scan_info->abort = false;
4926 
4927 	rtw89_chanctx_proceed(rtwdev);
4928 }
4929 
4930 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
4931 {
4932 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
4933 	int ret;
4934 
4935 	scan_info->abort = true;
4936 
4937 	ret = rtw89_hw_scan_offload(rtwdev, vif, false);
4938 	if (ret)
4939 		rtw89_hw_scan_complete(rtwdev, vif, true);
4940 }
4941 
4942 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
4943 {
4944 	struct rtw89_vif *rtwvif;
4945 
4946 	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
4947 		/* This variable implies connected or during attempt to connect */
4948 		if (!is_zero_ether_addr(rtwvif->bssid))
4949 			return true;
4950 	}
4951 
4952 	return false;
4953 }
4954 
4955 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
4956 			  bool enable)
4957 {
4958 	struct rtw89_scan_option opt = {0};
4959 	struct rtw89_vif *rtwvif;
4960 	bool connected;
4961 	int ret = 0;
4962 
4963 	rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
4964 	if (!rtwvif)
4965 		return -EINVAL;
4966 
4967 	connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
4968 	opt.enable = enable;
4969 	opt.target_ch_mode = connected;
4970 	if (enable) {
4971 		ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected);
4972 		if (ret)
4973 			goto out;
4974 	}
4975 	ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif);
4976 out:
4977 	return ret;
4978 }
4979 
4980 #define H2C_FW_CPU_EXCEPTION_LEN 4
4981 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
4982 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
4983 {
4984 	struct sk_buff *skb;
4985 	int ret;
4986 
4987 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
4988 	if (!skb) {
4989 		rtw89_err(rtwdev,
4990 			  "failed to alloc skb for fw cpu exception\n");
4991 		return -ENOMEM;
4992 	}
4993 
4994 	skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
4995 	RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
4996 					   H2C_FW_CPU_EXCEPTION_TYPE_DEF);
4997 
4998 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4999 			      H2C_CAT_TEST,
5000 			      H2C_CL_FW_STATUS_TEST,
5001 			      H2C_FUNC_CPU_EXCEPTION, 0, 0,
5002 			      H2C_FW_CPU_EXCEPTION_LEN);
5003 
5004 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5005 	if (ret) {
5006 		rtw89_err(rtwdev, "failed to send h2c\n");
5007 		goto fail;
5008 	}
5009 
5010 	return 0;
5011 
5012 fail:
5013 	dev_kfree_skb_any(skb);
5014 	return ret;
5015 }
5016 
5017 #define H2C_PKT_DROP_LEN 24
5018 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
5019 			  const struct rtw89_pkt_drop_params *params)
5020 {
5021 	struct sk_buff *skb;
5022 	int ret;
5023 
5024 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
5025 	if (!skb) {
5026 		rtw89_err(rtwdev,
5027 			  "failed to alloc skb for packet drop\n");
5028 		return -ENOMEM;
5029 	}
5030 
5031 	switch (params->sel) {
5032 	case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
5033 	case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
5034 	case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
5035 	case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
5036 	case RTW89_PKT_DROP_SEL_BAND_ONCE:
5037 		break;
5038 	default:
5039 		rtw89_debug(rtwdev, RTW89_DBG_FW,
5040 			    "H2C of pkt drop might not fully support sel: %d yet\n",
5041 			    params->sel);
5042 		break;
5043 	}
5044 
5045 	skb_put(skb, H2C_PKT_DROP_LEN);
5046 	RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
5047 	RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
5048 	RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
5049 	RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
5050 	RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
5051 	RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
5052 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
5053 						  params->macid_band_sel[0]);
5054 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
5055 						  params->macid_band_sel[1]);
5056 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
5057 						  params->macid_band_sel[2]);
5058 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
5059 						  params->macid_band_sel[3]);
5060 
5061 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5062 			      H2C_CAT_MAC,
5063 			      H2C_CL_MAC_FW_OFLD,
5064 			      H2C_FUNC_PKT_DROP, 0, 0,
5065 			      H2C_PKT_DROP_LEN);
5066 
5067 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5068 	if (ret) {
5069 		rtw89_err(rtwdev, "failed to send h2c\n");
5070 		goto fail;
5071 	}
5072 
5073 	return 0;
5074 
5075 fail:
5076 	dev_kfree_skb_any(skb);
5077 	return ret;
5078 }
5079 
5080 #define H2C_KEEP_ALIVE_LEN 4
5081 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
5082 			    bool enable)
5083 {
5084 	struct sk_buff *skb;
5085 	u8 pkt_id = 0;
5086 	int ret;
5087 
5088 	if (enable) {
5089 		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
5090 						   RTW89_PKT_OFLD_TYPE_NULL_DATA,
5091 						   &pkt_id);
5092 		if (ret)
5093 			return -EPERM;
5094 	}
5095 
5096 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
5097 	if (!skb) {
5098 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
5099 		return -ENOMEM;
5100 	}
5101 
5102 	skb_put(skb, H2C_KEEP_ALIVE_LEN);
5103 
5104 	RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
5105 	RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
5106 	RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
5107 	RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id);
5108 
5109 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5110 			      H2C_CAT_MAC,
5111 			      H2C_CL_MAC_WOW,
5112 			      H2C_FUNC_KEEP_ALIVE, 0, 1,
5113 			      H2C_KEEP_ALIVE_LEN);
5114 
5115 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5116 	if (ret) {
5117 		rtw89_err(rtwdev, "failed to send h2c\n");
5118 		goto fail;
5119 	}
5120 
5121 	return 0;
5122 
5123 fail:
5124 	dev_kfree_skb_any(skb);
5125 
5126 	return ret;
5127 }
5128 
5129 #define H2C_DISCONNECT_DETECT_LEN 8
5130 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
5131 				   struct rtw89_vif *rtwvif, bool enable)
5132 {
5133 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
5134 	struct sk_buff *skb;
5135 	u8 macid = rtwvif->mac_id;
5136 	int ret;
5137 
5138 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
5139 	if (!skb) {
5140 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
5141 		return -ENOMEM;
5142 	}
5143 
5144 	skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
5145 
5146 	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
5147 		RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
5148 		RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
5149 		RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
5150 		RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
5151 		RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
5152 	}
5153 
5154 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5155 			      H2C_CAT_MAC,
5156 			      H2C_CL_MAC_WOW,
5157 			      H2C_FUNC_DISCONNECT_DETECT, 0, 1,
5158 			      H2C_DISCONNECT_DETECT_LEN);
5159 
5160 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5161 	if (ret) {
5162 		rtw89_err(rtwdev, "failed to send h2c\n");
5163 		goto fail;
5164 	}
5165 
5166 	return 0;
5167 
5168 fail:
5169 	dev_kfree_skb_any(skb);
5170 
5171 	return ret;
5172 }
5173 
5174 #define H2C_WOW_GLOBAL_LEN 8
5175 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
5176 			    bool enable)
5177 {
5178 	struct sk_buff *skb;
5179 	u8 macid = rtwvif->mac_id;
5180 	int ret;
5181 
5182 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
5183 	if (!skb) {
5184 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
5185 		return -ENOMEM;
5186 	}
5187 
5188 	skb_put(skb, H2C_WOW_GLOBAL_LEN);
5189 
5190 	RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
5191 	RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
5192 
5193 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5194 			      H2C_CAT_MAC,
5195 			      H2C_CL_MAC_WOW,
5196 			      H2C_FUNC_WOW_GLOBAL, 0, 1,
5197 			      H2C_WOW_GLOBAL_LEN);
5198 
5199 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5200 	if (ret) {
5201 		rtw89_err(rtwdev, "failed to send h2c\n");
5202 		goto fail;
5203 	}
5204 
5205 	return 0;
5206 
5207 fail:
5208 	dev_kfree_skb_any(skb);
5209 
5210 	return ret;
5211 }
5212 
5213 #define H2C_WAKEUP_CTRL_LEN 4
5214 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
5215 				 struct rtw89_vif *rtwvif,
5216 				 bool enable)
5217 {
5218 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
5219 	struct sk_buff *skb;
5220 	u8 macid = rtwvif->mac_id;
5221 	int ret;
5222 
5223 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
5224 	if (!skb) {
5225 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
5226 		return -ENOMEM;
5227 	}
5228 
5229 	skb_put(skb, H2C_WAKEUP_CTRL_LEN);
5230 
5231 	if (rtw_wow->pattern_cnt)
5232 		RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
5233 	if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
5234 		RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
5235 	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
5236 		RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
5237 
5238 	RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
5239 
5240 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5241 			      H2C_CAT_MAC,
5242 			      H2C_CL_MAC_WOW,
5243 			      H2C_FUNC_WAKEUP_CTRL, 0, 1,
5244 			      H2C_WAKEUP_CTRL_LEN);
5245 
5246 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5247 	if (ret) {
5248 		rtw89_err(rtwdev, "failed to send h2c\n");
5249 		goto fail;
5250 	}
5251 
5252 	return 0;
5253 
5254 fail:
5255 	dev_kfree_skb_any(skb);
5256 
5257 	return ret;
5258 }
5259 
5260 #define H2C_WOW_CAM_UPD_LEN 24
5261 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
5262 			    struct rtw89_wow_cam_info *cam_info)
5263 {
5264 	struct sk_buff *skb;
5265 	int ret;
5266 
5267 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
5268 	if (!skb) {
5269 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
5270 		return -ENOMEM;
5271 	}
5272 
5273 	skb_put(skb, H2C_WOW_CAM_UPD_LEN);
5274 
5275 	RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
5276 	RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
5277 	if (cam_info->valid) {
5278 		RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
5279 		RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
5280 		RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
5281 		RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
5282 		RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
5283 		RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
5284 							     cam_info->negative_pattern_match);
5285 		RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
5286 						   cam_info->skip_mac_hdr);
5287 		RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
5288 		RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
5289 		RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
5290 	}
5291 	RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
5292 
5293 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5294 			      H2C_CAT_MAC,
5295 			      H2C_CL_MAC_WOW,
5296 			      H2C_FUNC_WOW_CAM_UPD, 0, 1,
5297 			      H2C_WOW_CAM_UPD_LEN);
5298 
5299 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5300 	if (ret) {
5301 		rtw89_err(rtwdev, "failed to send h2c\n");
5302 		goto fail;
5303 	}
5304 
5305 	return 0;
5306 fail:
5307 	dev_kfree_skb_any(skb);
5308 
5309 	return ret;
5310 }
5311 
5312 /* Return < 0, if failures happen during waiting for the condition.
5313  * Return 0, when waiting for the condition succeeds.
5314  * Return > 0, if the wait is considered unreachable due to driver/FW design,
5315  * where 1 means during SER.
5316  */
5317 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
5318 				 struct rtw89_wait_info *wait, unsigned int cond)
5319 {
5320 	int ret;
5321 
5322 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5323 	if (ret) {
5324 		rtw89_err(rtwdev, "failed to send h2c\n");
5325 		dev_kfree_skb_any(skb);
5326 		return -EBUSY;
5327 	}
5328 
5329 	if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
5330 		return 1;
5331 
5332 	return rtw89_wait_for_cond(wait, cond);
5333 }
5334 
5335 #define H2C_ADD_MCC_LEN 16
5336 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
5337 			 const struct rtw89_fw_mcc_add_req *p)
5338 {
5339 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5340 	struct sk_buff *skb;
5341 	unsigned int cond;
5342 
5343 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
5344 	if (!skb) {
5345 		rtw89_err(rtwdev,
5346 			  "failed to alloc skb for add mcc\n");
5347 		return -ENOMEM;
5348 	}
5349 
5350 	skb_put(skb, H2C_ADD_MCC_LEN);
5351 	RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
5352 	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
5353 	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
5354 	RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
5355 	RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
5356 	RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
5357 	RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
5358 	RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
5359 	RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
5360 	RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
5361 	RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
5362 	RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
5363 	RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
5364 	RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
5365 	RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
5366 	RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
5367 	RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
5368 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
5369 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
5370 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
5371 
5372 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5373 			      H2C_CAT_MAC,
5374 			      H2C_CL_MCC,
5375 			      H2C_FUNC_ADD_MCC, 0, 0,
5376 			      H2C_ADD_MCC_LEN);
5377 
5378 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
5379 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5380 }
5381 
5382 #define H2C_START_MCC_LEN 12
5383 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
5384 			   const struct rtw89_fw_mcc_start_req *p)
5385 {
5386 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5387 	struct sk_buff *skb;
5388 	unsigned int cond;
5389 
5390 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
5391 	if (!skb) {
5392 		rtw89_err(rtwdev,
5393 			  "failed to alloc skb for start mcc\n");
5394 		return -ENOMEM;
5395 	}
5396 
5397 	skb_put(skb, H2C_START_MCC_LEN);
5398 	RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
5399 	RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
5400 	RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
5401 	RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
5402 	RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
5403 	RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
5404 	RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
5405 	RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
5406 	RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
5407 
5408 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5409 			      H2C_CAT_MAC,
5410 			      H2C_CL_MCC,
5411 			      H2C_FUNC_START_MCC, 0, 0,
5412 			      H2C_START_MCC_LEN);
5413 
5414 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
5415 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5416 }
5417 
5418 #define H2C_STOP_MCC_LEN 4
5419 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
5420 			  bool prev_groups)
5421 {
5422 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5423 	struct sk_buff *skb;
5424 	unsigned int cond;
5425 
5426 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
5427 	if (!skb) {
5428 		rtw89_err(rtwdev,
5429 			  "failed to alloc skb for stop mcc\n");
5430 		return -ENOMEM;
5431 	}
5432 
5433 	skb_put(skb, H2C_STOP_MCC_LEN);
5434 	RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
5435 	RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
5436 	RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
5437 
5438 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5439 			      H2C_CAT_MAC,
5440 			      H2C_CL_MCC,
5441 			      H2C_FUNC_STOP_MCC, 0, 0,
5442 			      H2C_STOP_MCC_LEN);
5443 
5444 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
5445 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5446 }
5447 
5448 #define H2C_DEL_MCC_GROUP_LEN 4
5449 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
5450 			       bool prev_groups)
5451 {
5452 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5453 	struct sk_buff *skb;
5454 	unsigned int cond;
5455 
5456 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
5457 	if (!skb) {
5458 		rtw89_err(rtwdev,
5459 			  "failed to alloc skb for del mcc group\n");
5460 		return -ENOMEM;
5461 	}
5462 
5463 	skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
5464 	RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
5465 	RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
5466 
5467 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5468 			      H2C_CAT_MAC,
5469 			      H2C_CL_MCC,
5470 			      H2C_FUNC_DEL_MCC_GROUP, 0, 0,
5471 			      H2C_DEL_MCC_GROUP_LEN);
5472 
5473 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
5474 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5475 }
5476 
5477 #define H2C_RESET_MCC_GROUP_LEN 4
5478 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
5479 {
5480 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5481 	struct sk_buff *skb;
5482 	unsigned int cond;
5483 
5484 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
5485 	if (!skb) {
5486 		rtw89_err(rtwdev,
5487 			  "failed to alloc skb for reset mcc group\n");
5488 		return -ENOMEM;
5489 	}
5490 
5491 	skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
5492 	RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
5493 
5494 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5495 			      H2C_CAT_MAC,
5496 			      H2C_CL_MCC,
5497 			      H2C_FUNC_RESET_MCC_GROUP, 0, 0,
5498 			      H2C_RESET_MCC_GROUP_LEN);
5499 
5500 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
5501 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5502 }
5503 
5504 #define H2C_MCC_REQ_TSF_LEN 4
5505 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
5506 			     const struct rtw89_fw_mcc_tsf_req *req,
5507 			     struct rtw89_mac_mcc_tsf_rpt *rpt)
5508 {
5509 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5510 	struct rtw89_mac_mcc_tsf_rpt *tmp;
5511 	struct sk_buff *skb;
5512 	unsigned int cond;
5513 	int ret;
5514 
5515 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
5516 	if (!skb) {
5517 		rtw89_err(rtwdev,
5518 			  "failed to alloc skb for mcc req tsf\n");
5519 		return -ENOMEM;
5520 	}
5521 
5522 	skb_put(skb, H2C_MCC_REQ_TSF_LEN);
5523 	RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
5524 	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
5525 	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
5526 
5527 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5528 			      H2C_CAT_MAC,
5529 			      H2C_CL_MCC,
5530 			      H2C_FUNC_MCC_REQ_TSF, 0, 0,
5531 			      H2C_MCC_REQ_TSF_LEN);
5532 
5533 	cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
5534 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5535 	if (ret)
5536 		return ret;
5537 
5538 	tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
5539 	*rpt = *tmp;
5540 
5541 	return 0;
5542 }
5543 
5544 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
5545 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid,
5546 				  u8 *bitmap)
5547 {
5548 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5549 	struct sk_buff *skb;
5550 	unsigned int cond;
5551 	u8 map_len;
5552 	u8 h2c_len;
5553 
5554 	BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
5555 	map_len = RTW89_MAX_MAC_ID_NUM / 8;
5556 	h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
5557 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
5558 	if (!skb) {
5559 		rtw89_err(rtwdev,
5560 			  "failed to alloc skb for mcc macid bitmap\n");
5561 		return -ENOMEM;
5562 	}
5563 
5564 	skb_put(skb, h2c_len);
5565 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
5566 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
5567 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
5568 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
5569 
5570 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5571 			      H2C_CAT_MAC,
5572 			      H2C_CL_MCC,
5573 			      H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
5574 			      h2c_len);
5575 
5576 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
5577 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5578 }
5579 
5580 #define H2C_MCC_SYNC_LEN 4
5581 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
5582 			  u8 target, u8 offset)
5583 {
5584 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5585 	struct sk_buff *skb;
5586 	unsigned int cond;
5587 
5588 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
5589 	if (!skb) {
5590 		rtw89_err(rtwdev,
5591 			  "failed to alloc skb for mcc sync\n");
5592 		return -ENOMEM;
5593 	}
5594 
5595 	skb_put(skb, H2C_MCC_SYNC_LEN);
5596 	RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
5597 	RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
5598 	RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
5599 	RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
5600 
5601 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5602 			      H2C_CAT_MAC,
5603 			      H2C_CL_MCC,
5604 			      H2C_FUNC_MCC_SYNC, 0, 0,
5605 			      H2C_MCC_SYNC_LEN);
5606 
5607 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
5608 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5609 }
5610 
5611 #define H2C_MCC_SET_DURATION_LEN 20
5612 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
5613 				  const struct rtw89_fw_mcc_duration *p)
5614 {
5615 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
5616 	struct sk_buff *skb;
5617 	unsigned int cond;
5618 
5619 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
5620 	if (!skb) {
5621 		rtw89_err(rtwdev,
5622 			  "failed to alloc skb for mcc set duration\n");
5623 		return -ENOMEM;
5624 	}
5625 
5626 	skb_put(skb, H2C_MCC_SET_DURATION_LEN);
5627 	RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
5628 	RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
5629 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
5630 	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
5631 	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
5632 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
5633 						       p->start_tsf_low);
5634 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
5635 							p->start_tsf_high);
5636 	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
5637 	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
5638 
5639 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5640 			      H2C_CAT_MAC,
5641 			      H2C_CL_MCC,
5642 			      H2C_FUNC_MCC_SET_DURATION, 0, 0,
5643 			      H2C_MCC_SET_DURATION_LEN);
5644 
5645 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
5646 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5647 }
5648 
5649 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
5650 {
5651 	static const u8 zeros[U8_MAX] = {};
5652 
5653 	return memcmp(ext_ptr, zeros, ext_len) == 0;
5654 }
5655 
5656 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz)	\
5657 ({							\
5658 	u8 __var_sz = sizeof(*(e));			\
5659 	bool __accept;					\
5660 	if (__var_sz >= (ent_sz))			\
5661 		__accept = true;			\
5662 	else						\
5663 		__accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\
5664 						     (ent_sz) - __var_sz);\
5665 	__accept;					\
5666 })
5667 
5668 static bool
5669 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e,
5670 			    const void *cursor,
5671 			    const struct rtw89_txpwr_conf *conf)
5672 {
5673 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5674 		return false;
5675 
5676 	if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM)
5677 		return false;
5678 
5679 	switch (e->rs) {
5680 	case RTW89_RS_CCK:
5681 		if (e->shf + e->len > RTW89_RATE_CCK_NUM)
5682 			return false;
5683 		break;
5684 	case RTW89_RS_OFDM:
5685 		if (e->shf + e->len > RTW89_RATE_OFDM_NUM)
5686 			return false;
5687 		break;
5688 	case RTW89_RS_MCS:
5689 		if (e->shf + e->len > __RTW89_RATE_MCS_NUM ||
5690 		    e->nss >= RTW89_NSS_NUM ||
5691 		    e->ofdma >= RTW89_OFDMA_NUM)
5692 			return false;
5693 		break;
5694 	case RTW89_RS_HEDCM:
5695 		if (e->shf + e->len > RTW89_RATE_HEDCM_NUM ||
5696 		    e->nss >= RTW89_NSS_HEDCM_NUM ||
5697 		    e->ofdma >= RTW89_OFDMA_NUM)
5698 			return false;
5699 		break;
5700 	case RTW89_RS_OFFSET:
5701 		if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM)
5702 			return false;
5703 		break;
5704 	default:
5705 		return false;
5706 	}
5707 
5708 	return true;
5709 }
5710 
5711 static
5712 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev,
5713 				const struct rtw89_txpwr_table *tbl)
5714 {
5715 	const struct rtw89_txpwr_conf *conf = tbl->data;
5716 	struct rtw89_fw_txpwr_byrate_entry entry = {};
5717 	struct rtw89_txpwr_byrate *byr_head;
5718 	struct rtw89_rate_desc desc = {};
5719 	const void *cursor;
5720 	u32 data;
5721 	s8 *byr;
5722 	int i;
5723 
5724 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5725 		if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf))
5726 			continue;
5727 
5728 		byr_head = &rtwdev->byr[entry.band][entry.bw];
5729 		data = le32_to_cpu(entry.data);
5730 		desc.ofdma = entry.ofdma;
5731 		desc.nss = entry.nss;
5732 		desc.rs = entry.rs;
5733 
5734 		for (i = 0; i < entry.len; i++, data >>= 8) {
5735 			desc.idx = entry.shf + i;
5736 			byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
5737 			*byr = data & 0xff;
5738 		}
5739 	}
5740 }
5741 
5742 static bool
5743 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e,
5744 			      const void *cursor,
5745 			      const struct rtw89_txpwr_conf *conf)
5746 {
5747 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5748 		return false;
5749 
5750 	if (e->bw >= RTW89_2G_BW_NUM)
5751 		return false;
5752 	if (e->nt >= RTW89_NTX_NUM)
5753 		return false;
5754 	if (e->rs >= RTW89_RS_LMT_NUM)
5755 		return false;
5756 	if (e->bf >= RTW89_BF_NUM)
5757 		return false;
5758 	if (e->regd >= RTW89_REGD_NUM)
5759 		return false;
5760 	if (e->ch_idx >= RTW89_2G_CH_NUM)
5761 		return false;
5762 
5763 	return true;
5764 }
5765 
5766 static
5767 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data)
5768 {
5769 	const struct rtw89_txpwr_conf *conf = &data->conf;
5770 	struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {};
5771 	const void *cursor;
5772 
5773 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5774 		if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf))
5775 			continue;
5776 
5777 		data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
5778 		       [entry.ch_idx] = entry.v;
5779 	}
5780 }
5781 
5782 static bool
5783 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e,
5784 			      const void *cursor,
5785 			      const struct rtw89_txpwr_conf *conf)
5786 {
5787 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5788 		return false;
5789 
5790 	if (e->bw >= RTW89_5G_BW_NUM)
5791 		return false;
5792 	if (e->nt >= RTW89_NTX_NUM)
5793 		return false;
5794 	if (e->rs >= RTW89_RS_LMT_NUM)
5795 		return false;
5796 	if (e->bf >= RTW89_BF_NUM)
5797 		return false;
5798 	if (e->regd >= RTW89_REGD_NUM)
5799 		return false;
5800 	if (e->ch_idx >= RTW89_5G_CH_NUM)
5801 		return false;
5802 
5803 	return true;
5804 }
5805 
5806 static
5807 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data)
5808 {
5809 	const struct rtw89_txpwr_conf *conf = &data->conf;
5810 	struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {};
5811 	const void *cursor;
5812 
5813 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5814 		if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf))
5815 			continue;
5816 
5817 		data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
5818 		       [entry.ch_idx] = entry.v;
5819 	}
5820 }
5821 
5822 static bool
5823 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e,
5824 			      const void *cursor,
5825 			      const struct rtw89_txpwr_conf *conf)
5826 {
5827 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5828 		return false;
5829 
5830 	if (e->bw >= RTW89_6G_BW_NUM)
5831 		return false;
5832 	if (e->nt >= RTW89_NTX_NUM)
5833 		return false;
5834 	if (e->rs >= RTW89_RS_LMT_NUM)
5835 		return false;
5836 	if (e->bf >= RTW89_BF_NUM)
5837 		return false;
5838 	if (e->regd >= RTW89_REGD_NUM)
5839 		return false;
5840 	if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
5841 		return false;
5842 	if (e->ch_idx >= RTW89_6G_CH_NUM)
5843 		return false;
5844 
5845 	return true;
5846 }
5847 
5848 static
5849 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data)
5850 {
5851 	const struct rtw89_txpwr_conf *conf = &data->conf;
5852 	struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {};
5853 	const void *cursor;
5854 
5855 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5856 		if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf))
5857 			continue;
5858 
5859 		data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
5860 		       [entry.reg_6ghz_power][entry.ch_idx] = entry.v;
5861 	}
5862 }
5863 
5864 static bool
5865 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e,
5866 				 const void *cursor,
5867 				 const struct rtw89_txpwr_conf *conf)
5868 {
5869 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5870 		return false;
5871 
5872 	if (e->ru >= RTW89_RU_NUM)
5873 		return false;
5874 	if (e->nt >= RTW89_NTX_NUM)
5875 		return false;
5876 	if (e->regd >= RTW89_REGD_NUM)
5877 		return false;
5878 	if (e->ch_idx >= RTW89_2G_CH_NUM)
5879 		return false;
5880 
5881 	return true;
5882 }
5883 
5884 static
5885 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data)
5886 {
5887 	const struct rtw89_txpwr_conf *conf = &data->conf;
5888 	struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {};
5889 	const void *cursor;
5890 
5891 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5892 		if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf))
5893 			continue;
5894 
5895 		data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
5896 	}
5897 }
5898 
5899 static bool
5900 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e,
5901 				 const void *cursor,
5902 				 const struct rtw89_txpwr_conf *conf)
5903 {
5904 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5905 		return false;
5906 
5907 	if (e->ru >= RTW89_RU_NUM)
5908 		return false;
5909 	if (e->nt >= RTW89_NTX_NUM)
5910 		return false;
5911 	if (e->regd >= RTW89_REGD_NUM)
5912 		return false;
5913 	if (e->ch_idx >= RTW89_5G_CH_NUM)
5914 		return false;
5915 
5916 	return true;
5917 }
5918 
5919 static
5920 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data)
5921 {
5922 	const struct rtw89_txpwr_conf *conf = &data->conf;
5923 	struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {};
5924 	const void *cursor;
5925 
5926 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5927 		if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf))
5928 			continue;
5929 
5930 		data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
5931 	}
5932 }
5933 
5934 static bool
5935 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e,
5936 				 const void *cursor,
5937 				 const struct rtw89_txpwr_conf *conf)
5938 {
5939 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5940 		return false;
5941 
5942 	if (e->ru >= RTW89_RU_NUM)
5943 		return false;
5944 	if (e->nt >= RTW89_NTX_NUM)
5945 		return false;
5946 	if (e->regd >= RTW89_REGD_NUM)
5947 		return false;
5948 	if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
5949 		return false;
5950 	if (e->ch_idx >= RTW89_6G_CH_NUM)
5951 		return false;
5952 
5953 	return true;
5954 }
5955 
5956 static
5957 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data)
5958 {
5959 	const struct rtw89_txpwr_conf *conf = &data->conf;
5960 	struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {};
5961 	const void *cursor;
5962 
5963 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5964 		if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf))
5965 			continue;
5966 
5967 		data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power]
5968 		       [entry.ch_idx] = entry.v;
5969 	}
5970 }
5971 
5972 static bool
5973 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e,
5974 			    const void *cursor,
5975 			    const struct rtw89_txpwr_conf *conf)
5976 {
5977 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
5978 		return false;
5979 
5980 	if (e->band >= RTW89_BAND_NUM)
5981 		return false;
5982 	if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM)
5983 		return false;
5984 	if (e->regd >= RTW89_REGD_NUM)
5985 		return false;
5986 
5987 	return true;
5988 }
5989 
5990 static
5991 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data)
5992 {
5993 	const struct rtw89_txpwr_conf *conf = &data->conf;
5994 	struct rtw89_fw_tx_shape_lmt_entry entry = {};
5995 	const void *cursor;
5996 
5997 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
5998 		if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf))
5999 			continue;
6000 
6001 		data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v;
6002 	}
6003 }
6004 
6005 static bool
6006 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e,
6007 			       const void *cursor,
6008 			       const struct rtw89_txpwr_conf *conf)
6009 {
6010 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
6011 		return false;
6012 
6013 	if (e->band >= RTW89_BAND_NUM)
6014 		return false;
6015 	if (e->regd >= RTW89_REGD_NUM)
6016 		return false;
6017 
6018 	return true;
6019 }
6020 
6021 static
6022 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
6023 {
6024 	const struct rtw89_txpwr_conf *conf = &data->conf;
6025 	struct rtw89_fw_tx_shape_lmt_ru_entry entry = {};
6026 	const void *cursor;
6027 
6028 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
6029 		if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf))
6030 			continue;
6031 
6032 		data->v[entry.band][entry.regd] = entry.v;
6033 	}
6034 }
6035 
6036 const struct rtw89_rfe_parms *
6037 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
6038 			    const struct rtw89_rfe_parms *init)
6039 {
6040 	struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data;
6041 	struct rtw89_rfe_parms *parms;
6042 
6043 	if (!rfe_data)
6044 		return init;
6045 
6046 	parms = &rfe_data->rfe_parms;
6047 	if (init)
6048 		*parms = *init;
6049 
6050 	if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) {
6051 		rfe_data->byrate.tbl.data = &rfe_data->byrate.conf;
6052 		rfe_data->byrate.tbl.size = 0; /* don't care here */
6053 		rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate;
6054 		parms->byr_tbl = &rfe_data->byrate.tbl;
6055 	}
6056 
6057 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) {
6058 		rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz);
6059 		parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v;
6060 	}
6061 
6062 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) {
6063 		rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz);
6064 		parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v;
6065 	}
6066 
6067 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) {
6068 		rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz);
6069 		parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
6070 	}
6071 
6072 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
6073 		rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
6074 		parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
6075 	}
6076 
6077 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) {
6078 		rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz);
6079 		parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v;
6080 	}
6081 
6082 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) {
6083 		rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz);
6084 		parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
6085 	}
6086 
6087 	if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
6088 		rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
6089 		parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
6090 	}
6091 
6092 	if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) {
6093 		rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru);
6094 		parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
6095 	}
6096 
6097 	return parms;
6098 }
6099