xref: /linux/drivers/net/wireless/realtek/rtw89/fw.c (revision df9c299371054cb725eef730fd0f1d0fe2ed6bb0)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020  Realtek Corporation
3  */
4 
5 #include <linux/if_arp.h>
6 #include "cam.h"
7 #include "chan.h"
8 #include "coex.h"
9 #include "debug.h"
10 #include "fw.h"
11 #include "mac.h"
12 #include "phy.h"
13 #include "ps.h"
14 #include "reg.h"
15 #include "util.h"
16 #include "wow.h"
17 
18 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev);
19 
20 struct rtw89_eapol_2_of_2 {
21 	u8 gtkbody[14];
22 	u8 key_des_ver;
23 	u8 rsvd[92];
24 } __packed;
25 
26 struct rtw89_sa_query {
27 	u8 category;
28 	u8 action;
29 } __packed;
30 
31 struct rtw89_arp_rsp {
32 	u8 llc_hdr[sizeof(rfc1042_header)];
33 	__be16 llc_type;
34 	struct arphdr arp_hdr;
35 	u8 sender_hw[ETH_ALEN];
36 	__be32 sender_ip;
37 	u8 target_hw[ETH_ALEN];
38 	__be32 target_ip;
39 } __packed;
40 
41 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
42 
43 const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = {
44 	.ver = 0x00,
45 	.list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
46 		 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
47 		 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
48 		 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
49 	},
50 };
51 EXPORT_SYMBOL(rtw89_fw_blacklist_default);
52 
53 union rtw89_fw_element_arg {
54 	size_t offset;
55 	enum rtw89_rf_path rf_path;
56 	enum rtw89_fw_type fw_type;
57 };
58 
59 struct rtw89_fw_element_handler {
60 	int (*fn)(struct rtw89_dev *rtwdev,
61 		  const struct rtw89_fw_element_hdr *elm,
62 		  const union rtw89_fw_element_arg arg);
63 	const union rtw89_fw_element_arg arg;
64 	const char *name;
65 };
66 
67 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
68 				    struct sk_buff *skb);
69 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
70 				 struct rtw89_wait_info *wait, unsigned int cond);
71 static int __parse_security_section(struct rtw89_dev *rtwdev,
72 				    struct rtw89_fw_bin_info *info,
73 				    struct rtw89_fw_hdr_section_info *section_info,
74 				    const void *content,
75 				    u32 *mssc_len);
76 
77 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
78 					      bool header)
79 {
80 	struct sk_buff *skb;
81 	u32 header_len = 0;
82 	u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
83 
84 	if (header)
85 		header_len = H2C_HEADER_LEN;
86 
87 	skb = dev_alloc_skb(len + header_len + h2c_desc_size);
88 	if (!skb)
89 		return NULL;
90 	skb_reserve(skb, header_len + h2c_desc_size);
91 	memset(skb->data, 0, len);
92 
93 	return skb;
94 }
95 
96 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
97 {
98 	return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
99 }
100 
101 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
102 {
103 	return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
104 }
105 
106 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
107 {
108 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
109 	u8 val;
110 	int ret;
111 
112 	ret = read_poll_timeout_atomic(mac->fwdl_get_status, val,
113 				       val == RTW89_FWDL_WCPU_FW_INIT_RDY,
114 				       1, FWDL_WAIT_CNT, false, rtwdev, type);
115 	if (ret) {
116 		switch (val) {
117 		case RTW89_FWDL_CHECKSUM_FAIL:
118 			rtw89_err(rtwdev, "fw checksum fail\n");
119 			return -EINVAL;
120 
121 		case RTW89_FWDL_SECURITY_FAIL:
122 			rtw89_err(rtwdev, "fw security fail\n");
123 			return -EINVAL;
124 
125 		case RTW89_FWDL_CV_NOT_MATCH:
126 			rtw89_err(rtwdev, "fw cv not match\n");
127 			return -EINVAL;
128 
129 		default:
130 			rtw89_err(rtwdev, "fw unexpected status %d\n", val);
131 			return -EBUSY;
132 		}
133 	}
134 
135 	set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
136 
137 	return 0;
138 }
139 
140 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
141 				  struct rtw89_fw_bin_info *info)
142 {
143 	const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
144 	const struct rtw89_chip_info *chip = rtwdev->chip;
145 	struct rtw89_fw_hdr_section_info *section_info;
146 	struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
147 	const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
148 	const struct rtw89_fw_hdr_section *section;
149 	const u8 *fw_end = fw + len;
150 	const u8 *bin;
151 	u32 base_hdr_len;
152 	u32 mssc_len;
153 	int ret;
154 	u32 i;
155 
156 	if (!info)
157 		return -EINVAL;
158 
159 	info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
160 	base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
161 	info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
162 	info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE);
163 
164 	if (info->dynamic_hdr_en) {
165 		info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
166 		info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
167 		fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
168 		if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
169 			rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
170 			return -EINVAL;
171 		}
172 	} else {
173 		info->hdr_len = base_hdr_len;
174 		info->dynamic_hdr_len = 0;
175 	}
176 
177 	bin = fw + info->hdr_len;
178 
179 	/* jump to section header */
180 	section_info = info->section_info;
181 	for (i = 0; i < info->section_num; i++) {
182 		section = &fw_hdr->sections[i];
183 		section_info->type =
184 			le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
185 		section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
186 
187 		if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
188 			section_info->len += FWDL_SECTION_CHKSUM_LEN;
189 		section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
190 		section_info->dladdr =
191 			le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
192 		section_info->addr = bin;
193 
194 		if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
195 			section_info->mssc =
196 				le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
197 
198 			ret = __parse_security_section(rtwdev, info, section_info,
199 						       bin, &mssc_len);
200 			if (ret)
201 				return ret;
202 
203 			if (sec->secure_boot && chip->chip_id == RTL8852B)
204 				section_info->len_override = 960;
205 		} else {
206 			section_info->mssc = 0;
207 			mssc_len = 0;
208 		}
209 
210 		rtw89_debug(rtwdev, RTW89_DBG_FW,
211 			    "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
212 			    i, section_info->type, section_info->len,
213 			    section_info->mssc, mssc_len, bin - fw);
214 		rtw89_debug(rtwdev, RTW89_DBG_FW,
215 			    "           ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
216 			    section_info->ignore, section_info->key_addr,
217 			    section_info->key_addr ?
218 			    section_info->key_addr - section_info->addr : 0,
219 			    section_info->key_len, section_info->key_idx);
220 
221 		bin += section_info->len + mssc_len;
222 		section_info++;
223 	}
224 
225 	if (fw_end != bin) {
226 		rtw89_err(rtwdev, "[ERR]fw bin size\n");
227 		return -EINVAL;
228 	}
229 
230 	return 0;
231 }
232 
233 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
234 			      const struct rtw89_fw_mss_pool_hdr *mss_hdr,
235 			      u32 rmp_tbl_size, u32 *key_idx)
236 {
237 	struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
238 	u32 sel_byte_idx;
239 	u32 mss_sel_idx;
240 	u8 sel_bit_idx;
241 	int i;
242 
243 	if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
244 		if (!mss_hdr->defen)
245 			return -ENOENT;
246 
247 		mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
248 			      sec->mss_key_num;
249 	} else {
250 		if (mss_hdr->defen)
251 			mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
252 		else
253 			mss_sel_idx = 0;
254 		mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
255 						   le16_to_cpu(mss_hdr->msscust_max) +
256 			       sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
257 			       sec->mss_key_num;
258 	}
259 
260 	sel_byte_idx = mss_sel_idx >> 3;
261 	sel_bit_idx = mss_sel_idx & 0x7;
262 
263 	if (sel_byte_idx >= rmp_tbl_size)
264 		return -EFAULT;
265 
266 	if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
267 		return -ENOENT;
268 
269 	*key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
270 
271 	for (i = 0; i < sel_byte_idx; i++)
272 		*key_idx += hweight8(mss_hdr->rmp_tbl[i]);
273 
274 	return 0;
275 }
276 
277 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
278 				  struct rtw89_fw_bin_info *info,
279 				  struct rtw89_fw_hdr_section_info *section_info,
280 				  const void *content,
281 				  u32 *mssc_len)
282 {
283 	const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
284 	const union rtw89_fw_section_mssc_content *section_content = content;
285 	struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
286 	u32 rmp_tbl_size;
287 	u32 key_sign_len;
288 	u32 real_key_idx;
289 	u32 sb_sel_ver;
290 	int ret;
291 
292 	if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
293 		rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
294 		return -ENOENT;
295 	}
296 
297 	if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
298 		rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
299 				le16_to_cpu(mss_hdr->msscust_max) *
300 				mss_hdr->mssdev_max) >> 3;
301 		if (mss_hdr->defen)
302 			rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
303 	} else {
304 		rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
305 			  mss_hdr->rmpfmt);
306 		return -EINVAL;
307 	}
308 
309 	if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
310 		rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
311 			  rmp_tbl_size, (int)sizeof(*mss_hdr),
312 			  le32_to_cpu(mss_hdr->key_raw_offset));
313 		return -EINVAL;
314 	}
315 
316 	key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
317 	if (!key_sign_len)
318 		key_sign_len = 512;
319 
320 	if (info->dsp_checksum)
321 		key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
322 
323 	*mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
324 		    le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
325 
326 	if (!sec->secure_boot)
327 		goto out;
328 
329 	sb_sel_ver = get_unaligned_le32(&section_content->sb_sel_ver.v);
330 	if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
331 		goto ignore;
332 
333 	ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
334 	if (ret)
335 		goto ignore;
336 
337 	section_info->key_addr = content + section_info->len +
338 				le32_to_cpu(mss_hdr->key_raw_offset) +
339 				key_sign_len * real_key_idx;
340 	section_info->key_len = key_sign_len;
341 	section_info->key_idx = real_key_idx;
342 
343 out:
344 	if (info->secure_section_exist) {
345 		section_info->ignore = true;
346 		return 0;
347 	}
348 
349 	info->secure_section_exist = true;
350 
351 	return 0;
352 
353 ignore:
354 	section_info->ignore = true;
355 
356 	return 0;
357 }
358 
359 static int __check_secure_blacklist(struct rtw89_dev *rtwdev,
360 				    struct rtw89_fw_bin_info *info,
361 				    struct rtw89_fw_hdr_section_info *section_info,
362 				    const void *content)
363 {
364 	const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist;
365 	const union rtw89_fw_section_mssc_content *section_content = content;
366 	struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
367 	u8 byte_idx;
368 	u8 bit_mask;
369 
370 	if (!sec->secure_boot)
371 		return 0;
372 
373 	if (!info->secure_section_exist || section_info->ignore)
374 		return 0;
375 
376 	if (!chip_blacklist) {
377 		rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n");
378 		return -ENOENT;
379 	}
380 
381 	byte_idx = section_content->blacklist.bit_in_chip_list >> 3;
382 	bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7);
383 
384 	if (section_content->blacklist.ver > chip_blacklist->ver) {
385 		rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n",
386 			   section_content->blacklist.ver, chip_blacklist->ver);
387 		return -EINVAL;
388 	}
389 
390 	if (chip_blacklist->list[byte_idx] & bit_mask) {
391 		rtw89_warn(rtwdev, "firmware %u in chip blacklist\n",
392 			   section_content->blacklist.ver);
393 		return -EPERM;
394 	}
395 
396 	return 0;
397 }
398 
399 static int __parse_security_section(struct rtw89_dev *rtwdev,
400 				    struct rtw89_fw_bin_info *info,
401 				    struct rtw89_fw_hdr_section_info *section_info,
402 				    const void *content,
403 				    u32 *mssc_len)
404 {
405 	struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
406 	int ret;
407 
408 	if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) {
409 		ret = __parse_formatted_mssc(rtwdev, info, section_info,
410 					     content, mssc_len);
411 		if (ret)
412 			return -EINVAL;
413 	} else {
414 		*mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
415 		if (info->dsp_checksum)
416 			*mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
417 
418 		if (sec->secure_boot) {
419 			if (sec->mss_idx >= section_info->mssc) {
420 				rtw89_err(rtwdev, "unexpected MSS %d >= %d\n",
421 					  sec->mss_idx, section_info->mssc);
422 				return -EFAULT;
423 			}
424 			section_info->key_addr = content + section_info->len +
425 						 sec->mss_idx * FWDL_SECURITY_SIGLEN;
426 			section_info->key_len = FWDL_SECURITY_SIGLEN;
427 		}
428 
429 		info->secure_section_exist = true;
430 	}
431 
432 	ret = __check_secure_blacklist(rtwdev, info, section_info, content);
433 	WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n");
434 
435 	return 0;
436 }
437 
438 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
439 				  struct rtw89_fw_bin_info *info)
440 {
441 	const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
442 	struct rtw89_fw_hdr_section_info *section_info;
443 	const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
444 	const struct rtw89_fw_hdr_section_v1 *section;
445 	const u8 *fw_end = fw + len;
446 	const u8 *bin;
447 	u32 base_hdr_len;
448 	u32 mssc_len;
449 	int ret;
450 	u32 i;
451 
452 	info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
453 	info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
454 	base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
455 	info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
456 	info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE);
457 
458 	if (info->dynamic_hdr_en) {
459 		info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
460 		info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
461 		fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
462 		if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
463 			rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
464 			return -EINVAL;
465 		}
466 	} else {
467 		info->hdr_len = base_hdr_len;
468 		info->dynamic_hdr_len = 0;
469 	}
470 
471 	bin = fw + info->hdr_len;
472 
473 	/* jump to section header */
474 	section_info = info->section_info;
475 	for (i = 0; i < info->section_num; i++) {
476 		section = &fw_hdr->sections[i];
477 
478 		section_info->type =
479 			le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
480 		section_info->len =
481 			le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
482 		if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
483 			section_info->len += FWDL_SECTION_CHKSUM_LEN;
484 		section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
485 		section_info->dladdr =
486 			le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
487 		section_info->addr = bin;
488 
489 		if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
490 			section_info->mssc =
491 				le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
492 
493 			ret = __parse_security_section(rtwdev, info, section_info,
494 						       bin, &mssc_len);
495 			if (ret)
496 				return ret;
497 		} else {
498 			section_info->mssc = 0;
499 			mssc_len = 0;
500 		}
501 
502 		rtw89_debug(rtwdev, RTW89_DBG_FW,
503 			    "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
504 			    i, section_info->type, section_info->len,
505 			    section_info->mssc, mssc_len, bin - fw);
506 		rtw89_debug(rtwdev, RTW89_DBG_FW,
507 			    "           ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
508 			    section_info->ignore, section_info->key_addr,
509 			    section_info->key_addr ?
510 			    section_info->key_addr - section_info->addr : 0,
511 			    section_info->key_len, section_info->key_idx);
512 
513 		bin += section_info->len + mssc_len;
514 		section_info++;
515 	}
516 
517 	if (fw_end != bin) {
518 		rtw89_err(rtwdev, "[ERR]fw bin size\n");
519 		return -EINVAL;
520 	}
521 
522 	if (!info->secure_section_exist)
523 		rtw89_warn(rtwdev, "no firmware secure section\n");
524 
525 	return 0;
526 }
527 
528 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
529 			       const struct rtw89_fw_suit *fw_suit,
530 			       struct rtw89_fw_bin_info *info)
531 {
532 	const u8 *fw = fw_suit->data;
533 	u32 len = fw_suit->size;
534 
535 	if (!fw || !len) {
536 		rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
537 		return -ENOENT;
538 	}
539 
540 	switch (fw_suit->hdr_ver) {
541 	case 0:
542 		return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
543 	case 1:
544 		return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
545 	default:
546 		return -ENOENT;
547 	}
548 }
549 
550 static
551 const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev,
552 						  const struct firmware *firmware)
553 {
554 	const struct rtw89_mfw_hdr *mfw_hdr;
555 
556 	if (sizeof(*mfw_hdr) > firmware->size)
557 		return NULL;
558 
559 	mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0];
560 
561 	if (mfw_hdr->sig != RTW89_MFW_SIG)
562 		return NULL;
563 
564 	return mfw_hdr;
565 }
566 
567 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev,
568 				  const struct firmware *firmware,
569 				  const struct rtw89_mfw_hdr *mfw_hdr)
570 {
571 	const void *mfw = firmware->data;
572 	u32 mfw_len = firmware->size;
573 	u8 fw_nr = mfw_hdr->fw_nr;
574 	const void *ptr;
575 
576 	if (fw_nr == 0) {
577 		rtw89_err(rtwdev, "mfw header has no fw entry\n");
578 		return -ENOENT;
579 	}
580 
581 	ptr = &mfw_hdr->info[fw_nr];
582 
583 	if (ptr > mfw + mfw_len) {
584 		rtw89_err(rtwdev, "mfw header out of address\n");
585 		return -EFAULT;
586 	}
587 
588 	return 0;
589 }
590 
591 static
592 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
593 			struct rtw89_fw_suit *fw_suit, bool nowarn)
594 {
595 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
596 	const struct firmware *firmware = fw_info->req.firmware;
597 	const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
598 	const struct rtw89_mfw_hdr *mfw_hdr;
599 	const u8 *mfw = firmware->data;
600 	u32 mfw_len = firmware->size;
601 	int ret;
602 	int i;
603 
604 	mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
605 	if (!mfw_hdr) {
606 		rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
607 		/* legacy firmware support normal type only */
608 		if (type != RTW89_FW_NORMAL)
609 			return -EINVAL;
610 		fw_suit->data = mfw;
611 		fw_suit->size = mfw_len;
612 		return 0;
613 	}
614 
615 	ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
616 	if (ret)
617 		return ret;
618 
619 	for (i = 0; i < mfw_hdr->fw_nr; i++) {
620 		tmp = &mfw_hdr->info[i];
621 		if (tmp->type != type)
622 			continue;
623 
624 		if (type == RTW89_FW_LOGFMT) {
625 			mfw_info = tmp;
626 			goto found;
627 		}
628 
629 		/* Version order of WiFi firmware in firmware file are not in order,
630 		 * pass all firmware to find the equal or less but closest version.
631 		 */
632 		if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) {
633 			if (!mfw_info || mfw_info->cv < tmp->cv)
634 				mfw_info = tmp;
635 		}
636 	}
637 
638 	if (mfw_info)
639 		goto found;
640 
641 	if (!nowarn)
642 		rtw89_err(rtwdev, "no suitable firmware found\n");
643 	return -ENOENT;
644 
645 found:
646 	fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
647 	fw_suit->size = le32_to_cpu(mfw_info->size);
648 
649 	if (fw_suit->data + fw_suit->size > mfw + mfw_len) {
650 		rtw89_err(rtwdev, "fw_suit %d out of address\n", type);
651 		return -EFAULT;
652 	}
653 
654 	return 0;
655 }
656 
657 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
658 {
659 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
660 	const struct firmware *firmware = fw_info->req.firmware;
661 	const struct rtw89_mfw_info *mfw_info;
662 	const struct rtw89_mfw_hdr *mfw_hdr;
663 	u32 size;
664 	int ret;
665 
666 	mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
667 	if (!mfw_hdr) {
668 		rtw89_warn(rtwdev, "not mfw format\n");
669 		return 0;
670 	}
671 
672 	ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
673 	if (ret)
674 		return ret;
675 
676 	mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
677 	size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
678 
679 	return size;
680 }
681 
682 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
683 				   struct rtw89_fw_suit *fw_suit,
684 				   const struct rtw89_fw_hdr *hdr)
685 {
686 	fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
687 	fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
688 	fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
689 	fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
690 	fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
691 	fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
692 	fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
693 	fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
694 	fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
695 	fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
696 	fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
697 }
698 
699 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
700 				   struct rtw89_fw_suit *fw_suit,
701 				   const struct rtw89_fw_hdr_v1 *hdr)
702 {
703 	fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
704 	fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
705 	fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
706 	fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
707 	fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
708 	fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
709 	fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
710 	fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
711 	fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
712 	fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
713 	fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
714 }
715 
716 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
717 			       enum rtw89_fw_type type,
718 			       struct rtw89_fw_suit *fw_suit)
719 {
720 	const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
721 	const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
722 
723 	if (type == RTW89_FW_LOGFMT)
724 		return 0;
725 
726 	fw_suit->type = type;
727 	fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
728 
729 	switch (fw_suit->hdr_ver) {
730 	case 0:
731 		rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
732 		break;
733 	case 1:
734 		rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
735 		break;
736 	default:
737 		rtw89_err(rtwdev, "Unknown firmware header version %u\n",
738 			  fw_suit->hdr_ver);
739 		return -ENOENT;
740 	}
741 
742 	rtw89_info(rtwdev,
743 		   "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
744 		   fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
745 		   fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
746 
747 	return 0;
748 }
749 
750 static
751 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
752 			 bool nowarn)
753 {
754 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
755 	int ret;
756 
757 	ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
758 	if (ret)
759 		return ret;
760 
761 	return rtw89_fw_update_ver(rtwdev, type, fw_suit);
762 }
763 
764 static
765 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
766 				  const struct rtw89_fw_element_hdr *elm,
767 				  const union rtw89_fw_element_arg arg)
768 {
769 	enum rtw89_fw_type type = arg.fw_type;
770 	struct rtw89_hal *hal = &rtwdev->hal;
771 	struct rtw89_fw_suit *fw_suit;
772 
773 	/* Version of BB MCU is in decreasing order in firmware file, so take
774 	 * first equal or less version, which is equal or less but closest version.
775 	 */
776 	if (hal->cv < elm->u.bbmcu.cv)
777 		return 1; /* ignore this element */
778 
779 	fw_suit = rtw89_fw_suit_get(rtwdev, type);
780 	if (fw_suit->data)
781 		return 1; /* ignore this element (a firmware is taken already) */
782 
783 	fw_suit->data = elm->u.bbmcu.contents;
784 	fw_suit->size = le32_to_cpu(elm->size);
785 
786 	return rtw89_fw_update_ver(rtwdev, type, fw_suit);
787 }
788 
789 #define __DEF_FW_FEAT_COND(__cond, __op) \
790 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
791 { \
792 	return suit_ver_code __op comp_ver_code; \
793 }
794 
795 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
796 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
797 __DEF_FW_FEAT_COND(lt, <); /* less than */
798 
799 struct __fw_feat_cfg {
800 	enum rtw89_core_chip_id chip_id;
801 	enum rtw89_fw_feature feature;
802 	u32 ver_code;
803 	bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
804 };
805 
806 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
807 	{ \
808 		.chip_id = _chip, \
809 		.feature = RTW89_FW_FEATURE_ ## _feat, \
810 		.ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
811 		.cond = __fw_feat_cond_ ## _cond, \
812 	}
813 
814 static const struct __fw_feat_cfg fw_feat_tbl[] = {
815 	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
816 	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
817 	__CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER),
818 	__CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
819 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
820 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
821 	__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
822 	__CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX),
823 	__CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
824 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
825 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
826 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
827 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
828 	__CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
829 	__CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
830 	__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
831 	__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
832 	__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
833 	__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
834 	__CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
835 	__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
836 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
837 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
838 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
839 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
840 	__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
841 	__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
842 	__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
843 	__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
844 	__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
845 	__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
846 	__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
847 	__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
848 	__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
849 	__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
850 	__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
851 	__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
852 	__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
853 	__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
854 	__CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
855 	__CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
856 };
857 
858 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
859 					 const struct rtw89_chip_info *chip,
860 					 u32 ver_code)
861 {
862 	int i;
863 
864 	for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
865 		const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
866 
867 		if (chip->chip_id != ent->chip_id)
868 			continue;
869 
870 		if (ent->cond(ver_code, ent->ver_code))
871 			RTW89_SET_FW_FEATURE(ent->feature, fw);
872 	}
873 }
874 
875 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
876 {
877 	const struct rtw89_chip_info *chip = rtwdev->chip;
878 	const struct rtw89_fw_suit *fw_suit;
879 	u32 suit_ver_code;
880 
881 	fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
882 	suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
883 
884 	rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
885 }
886 
887 const struct firmware *
888 rtw89_early_fw_feature_recognize(struct device *device,
889 				 const struct rtw89_chip_info *chip,
890 				 struct rtw89_fw_info *early_fw,
891 				 int *used_fw_format)
892 {
893 	const struct firmware *firmware;
894 	char fw_name[64];
895 	int fw_format;
896 	u32 ver_code;
897 	int ret;
898 
899 	for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
900 		rtw89_fw_get_filename(fw_name, sizeof(fw_name),
901 				      chip->fw_basename, fw_format);
902 
903 		ret = request_firmware(&firmware, fw_name, device);
904 		if (!ret) {
905 			dev_info(device, "loaded firmware %s\n", fw_name);
906 			*used_fw_format = fw_format;
907 			break;
908 		}
909 	}
910 
911 	if (ret) {
912 		dev_err(device, "failed to early request firmware: %d\n", ret);
913 		return NULL;
914 	}
915 
916 	ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
917 
918 	if (!ver_code)
919 		goto out;
920 
921 	rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
922 
923 out:
924 	return firmware;
925 }
926 
927 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev)
928 {
929 	const struct rtw89_chip_variant *variant = rtwdev->variant;
930 	const struct rtw89_fw_suit *fw_suit;
931 	u32 suit_ver_code;
932 
933 	if (!variant)
934 		return 0;
935 
936 	fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
937 	suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
938 
939 	if (variant->fw_min_ver_code > suit_ver_code) {
940 		rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n",
941 			  variant->fw_min_ver_code);
942 		return -ENOENT;
943 	}
944 
945 	return 0;
946 }
947 
948 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
949 {
950 	const struct rtw89_chip_info *chip = rtwdev->chip;
951 	int ret;
952 
953 	if (chip->try_ce_fw) {
954 		ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
955 		if (!ret)
956 			goto normal_done;
957 	}
958 
959 	ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
960 	if (ret)
961 		return ret;
962 
963 normal_done:
964 	ret = rtw89_fw_validate_ver_required(rtwdev);
965 	if (ret)
966 		return ret;
967 
968 	/* It still works if wowlan firmware isn't existing. */
969 	__rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
970 
971 	/* It still works if log format file isn't existing. */
972 	__rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
973 
974 	rtw89_fw_recognize_features(rtwdev);
975 
976 	rtw89_coex_recognize_ver(rtwdev);
977 
978 	return 0;
979 }
980 
981 static
982 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
983 				 const struct rtw89_fw_element_hdr *elm,
984 				 const union rtw89_fw_element_arg arg)
985 {
986 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
987 	struct rtw89_phy_table *tbl;
988 	struct rtw89_reg2_def *regs;
989 	enum rtw89_rf_path rf_path;
990 	u32 n_regs, i;
991 	u8 idx;
992 
993 	tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
994 	if (!tbl)
995 		return -ENOMEM;
996 
997 	switch (le32_to_cpu(elm->id)) {
998 	case RTW89_FW_ELEMENT_ID_BB_REG:
999 		elm_info->bb_tbl = tbl;
1000 		break;
1001 	case RTW89_FW_ELEMENT_ID_BB_GAIN:
1002 		elm_info->bb_gain = tbl;
1003 		break;
1004 	case RTW89_FW_ELEMENT_ID_RADIO_A:
1005 	case RTW89_FW_ELEMENT_ID_RADIO_B:
1006 	case RTW89_FW_ELEMENT_ID_RADIO_C:
1007 	case RTW89_FW_ELEMENT_ID_RADIO_D:
1008 		rf_path = arg.rf_path;
1009 		idx = elm->u.reg2.idx;
1010 
1011 		elm_info->rf_radio[idx] = tbl;
1012 		tbl->rf_path = rf_path;
1013 		tbl->config = rtw89_phy_config_rf_reg_v1;
1014 		break;
1015 	case RTW89_FW_ELEMENT_ID_RF_NCTL:
1016 		elm_info->rf_nctl = tbl;
1017 		break;
1018 	default:
1019 		kfree(tbl);
1020 		return -ENOENT;
1021 	}
1022 
1023 	n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
1024 	regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL);
1025 	if (!regs)
1026 		goto out;
1027 
1028 	for (i = 0; i < n_regs; i++) {
1029 		regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
1030 		regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
1031 	}
1032 
1033 	tbl->n_regs = n_regs;
1034 	tbl->regs = regs;
1035 
1036 	return 0;
1037 
1038 out:
1039 	kfree(tbl);
1040 	return -ENOMEM;
1041 }
1042 
1043 static
1044 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev,
1045 				      const struct rtw89_fw_element_hdr *elm,
1046 				      const union rtw89_fw_element_arg arg)
1047 {
1048 	const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr;
1049 	const unsigned long offset = arg.offset;
1050 	struct rtw89_efuse *efuse = &rtwdev->efuse;
1051 	struct rtw89_txpwr_conf *conf;
1052 
1053 	if (!rtwdev->rfe_data) {
1054 		rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL);
1055 		if (!rtwdev->rfe_data)
1056 			return -ENOMEM;
1057 	}
1058 
1059 	conf = (void *)rtwdev->rfe_data + offset;
1060 
1061 	/* if multiple matched, take the last eventually */
1062 	if (txpwr_elm->rfe_type == efuse->rfe_type)
1063 		goto setup;
1064 
1065 	/* without one is matched, accept default */
1066 	if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE &&
1067 	    (!rtw89_txpwr_conf_valid(conf) ||
1068 	     conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE))
1069 		goto setup;
1070 
1071 	rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n",
1072 		    elm->id, txpwr_elm->rfe_type);
1073 	return 0;
1074 
1075 setup:
1076 	rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n",
1077 		    elm->id, txpwr_elm->rfe_type);
1078 
1079 	conf->rfe_type = txpwr_elm->rfe_type;
1080 	conf->ent_sz = txpwr_elm->ent_sz;
1081 	conf->num_ents = le32_to_cpu(txpwr_elm->num_ents);
1082 	conf->data = txpwr_elm->content;
1083 	return 0;
1084 }
1085 
1086 static
1087 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
1088 				       const struct rtw89_fw_element_hdr *elm,
1089 				       const union rtw89_fw_element_arg arg)
1090 {
1091 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1092 	const struct rtw89_chip_info *chip = rtwdev->chip;
1093 	u32 needed_bitmap = 0;
1094 	u32 offset = 0;
1095 	int subband;
1096 	u32 bitmap;
1097 	int type;
1098 
1099 	if (chip->support_bands & BIT(NL80211_BAND_6GHZ))
1100 		needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ;
1101 	if (chip->support_bands & BIT(NL80211_BAND_5GHZ))
1102 		needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ;
1103 	if (chip->support_bands & BIT(NL80211_BAND_2GHZ))
1104 		needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ;
1105 
1106 	bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
1107 
1108 	if ((bitmap & needed_bitmap) != needed_bitmap) {
1109 		rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n",
1110 			   needed_bitmap, bitmap);
1111 		return -ENOENT;
1112 	}
1113 
1114 	elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL);
1115 	if (!elm_info->txpwr_trk)
1116 		return -ENOMEM;
1117 
1118 	for (type = 0; bitmap; type++, bitmap >>= 1) {
1119 		if (!(bitmap & BIT(0)))
1120 			continue;
1121 
1122 		if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START &&
1123 		    type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX)
1124 			subband = 4;
1125 		else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START &&
1126 			 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX)
1127 			subband = 3;
1128 		else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START &&
1129 			 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX)
1130 			subband = 1;
1131 		else
1132 			break;
1133 
1134 		elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset];
1135 
1136 		offset += subband;
1137 		if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size))
1138 			goto err;
1139 	}
1140 
1141 	return 0;
1142 
1143 err:
1144 	rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n",
1145 		   offset, le32_to_cpu(elm->size));
1146 	kfree(elm_info->txpwr_trk);
1147 	elm_info->txpwr_trk = NULL;
1148 
1149 	return -EFAULT;
1150 }
1151 
1152 static
1153 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev,
1154 				     const struct rtw89_fw_element_hdr *elm,
1155 				     const union rtw89_fw_element_arg arg)
1156 {
1157 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1158 	u8 rfk_id;
1159 
1160 	if (elm_info->rfk_log_fmt)
1161 		goto allocated;
1162 
1163 	elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL);
1164 	if (!elm_info->rfk_log_fmt)
1165 		return 1; /* this is an optional element, so just ignore this */
1166 
1167 allocated:
1168 	rfk_id = elm->u.rfk_log_fmt.rfk_id;
1169 	if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM)
1170 		return 1;
1171 
1172 	elm_info->rfk_log_fmt->elm[rfk_id] = elm;
1173 
1174 	return 0;
1175 }
1176 
1177 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor,
1178 			      u8 cursor_size)
1179 {
1180 	/* fill default values if needed for backward compatibility */
1181 	struct rtw89_fw_regd_entry entry = {
1182 		.rule_2ghz = RTW89_NA,
1183 		.rule_5ghz = RTW89_NA,
1184 		.rule_6ghz = RTW89_NA,
1185 		.fmap = cpu_to_le32(0x0),
1186 	};
1187 	u8 valid_size = min_t(u8, sizeof(entry), cursor_size);
1188 	unsigned int i;
1189 	u32 fmap;
1190 
1191 	memcpy(&entry, cursor, valid_size);
1192 	memset(regd, 0, sizeof(*regd));
1193 
1194 	regd->alpha2[0] = entry.alpha2_0;
1195 	regd->alpha2[1] = entry.alpha2_1;
1196 	regd->alpha2[2] = '\0';
1197 
1198 	/* also need to consider forward compatibility */
1199 	regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ?
1200 					  entry.rule_2ghz : RTW89_NA;
1201 	regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ?
1202 					  entry.rule_5ghz : RTW89_NA;
1203 	regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ?
1204 					  entry.rule_6ghz : RTW89_NA;
1205 
1206 	BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap));
1207 	BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC);
1208 
1209 	fmap = le32_to_cpu(entry.fmap);
1210 	for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) {
1211 		if (fmap & BIT(i))
1212 			set_bit(i, regd->func_bitmap);
1213 	}
1214 
1215 	return true;
1216 }
1217 
1218 #define rtw89_for_each_in_regd_element(regd, element) \
1219 	for (const void *cursor = (element)->content, \
1220 	     *end = (element)->content + \
1221 		    le32_to_cpu((element)->num_ents) * (element)->ent_sz; \
1222 	     cursor < end; cursor += (element)->ent_sz) \
1223 		if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz))
1224 
1225 static
1226 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
1227 				  const struct rtw89_fw_element_hdr *elm,
1228 				  const union rtw89_fw_element_arg arg)
1229 {
1230 	const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd;
1231 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1232 	u32 num_ents = le32_to_cpu(regd_elm->num_ents);
1233 	struct rtw89_regd_data *p;
1234 	struct rtw89_regd regd;
1235 	u32 i = 0;
1236 
1237 	if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) {
1238 		rtw89_warn(rtwdev,
1239 			   "regd element ents (%d) are over max num (%d)\n",
1240 			   num_ents, RTW89_REGD_MAX_COUNTRY_NUM);
1241 		rtw89_warn(rtwdev,
1242 			   "regd element ignore and take another/common\n");
1243 		return 1;
1244 	}
1245 
1246 	if (elm_info->regd) {
1247 		rtw89_debug(rtwdev, RTW89_DBG_REGD,
1248 			    "regd element take the latter\n");
1249 		devm_kfree(rtwdev->dev, elm_info->regd);
1250 		elm_info->regd = NULL;
1251 	}
1252 
1253 	p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL);
1254 	if (!p)
1255 		return -ENOMEM;
1256 
1257 	p->nr = num_ents;
1258 	rtw89_for_each_in_regd_element(&regd, regd_elm)
1259 		p->map[i++] = regd;
1260 
1261 	if (i != num_ents) {
1262 		rtw89_err(rtwdev, "regd element has %d invalid ents\n",
1263 			  num_ents - i);
1264 		devm_kfree(rtwdev->dev, p);
1265 		return -EINVAL;
1266 	}
1267 
1268 	elm_info->regd = p;
1269 	return 0;
1270 }
1271 
1272 static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
1273 	[RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
1274 					{ .fw_type = RTW89_FW_BBMCU0 }, NULL},
1275 	[RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
1276 					{ .fw_type = RTW89_FW_BBMCU1 }, NULL},
1277 	[RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"},
1278 	[RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL},
1279 	[RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
1280 					 { .rf_path =  RF_PATH_A }, "radio A"},
1281 	[RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
1282 					 { .rf_path =  RF_PATH_B }, NULL},
1283 	[RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
1284 					 { .rf_path =  RF_PATH_C }, NULL},
1285 	[RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
1286 					 { .rf_path =  RF_PATH_D }, NULL},
1287 	[RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"},
1288 	[RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = {
1289 		rtw89_fw_recognize_txpwr_from_elm,
1290 		{ .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR",
1291 	},
1292 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = {
1293 		rtw89_fw_recognize_txpwr_from_elm,
1294 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL,
1295 	},
1296 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = {
1297 		rtw89_fw_recognize_txpwr_from_elm,
1298 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL,
1299 	},
1300 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = {
1301 		rtw89_fw_recognize_txpwr_from_elm,
1302 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
1303 	},
1304 	[RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = {
1305 		rtw89_fw_recognize_txpwr_from_elm,
1306 		{ .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL,
1307 	},
1308 	[RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = {
1309 		rtw89_fw_recognize_txpwr_from_elm,
1310 		{ .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL,
1311 	},
1312 	[RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = {
1313 		rtw89_fw_recognize_txpwr_from_elm,
1314 		{ .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL,
1315 	},
1316 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
1317 		rtw89_fw_recognize_txpwr_from_elm,
1318 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
1319 	},
1320 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = {
1321 		rtw89_fw_recognize_txpwr_from_elm,
1322 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL,
1323 	},
1324 	[RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = {
1325 		rtw89_fw_recognize_txpwr_from_elm,
1326 		{ .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
1327 	},
1328 	[RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = {
1329 		rtw89_fw_recognize_txpwr_from_elm,
1330 		{ .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL,
1331 	},
1332 	[RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = {
1333 		rtw89_fw_recognize_txpwr_from_elm,
1334 		{ .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL,
1335 	},
1336 	[RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = {
1337 		rtw89_fw_recognize_txpwr_from_elm,
1338 		{ .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL,
1339 	},
1340 	[RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
1341 		rtw89_fw_recognize_txpwr_from_elm,
1342 		{ .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
1343 	},
1344 	[RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = {
1345 		rtw89_fw_recognize_txpwr_from_elm,
1346 		{ .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL,
1347 	},
1348 	[RTW89_FW_ELEMENT_ID_TXPWR_TRK] = {
1349 		rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK",
1350 	},
1351 	[RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
1352 		rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
1353 	},
1354 	[RTW89_FW_ELEMENT_ID_REGD] = {
1355 		rtw89_recognize_regd_from_elm, {}, "REGD",
1356 	},
1357 };
1358 
1359 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
1360 {
1361 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
1362 	const struct firmware *firmware = fw_info->req.firmware;
1363 	const struct rtw89_chip_info *chip = rtwdev->chip;
1364 	u32 unrecognized_elements = chip->needed_fw_elms;
1365 	const struct rtw89_fw_element_handler *handler;
1366 	const struct rtw89_fw_element_hdr *hdr;
1367 	u32 elm_size;
1368 	u32 elem_id;
1369 	u32 offset;
1370 	int ret;
1371 
1372 	BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
1373 
1374 	offset = rtw89_mfw_get_size(rtwdev);
1375 	offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1376 	if (offset == 0)
1377 		return -EINVAL;
1378 
1379 	while (offset + sizeof(*hdr) < firmware->size) {
1380 		hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
1381 
1382 		elm_size = le32_to_cpu(hdr->size);
1383 		if (offset + elm_size >= firmware->size) {
1384 			rtw89_warn(rtwdev, "firmware element size exceeds\n");
1385 			break;
1386 		}
1387 
1388 		elem_id = le32_to_cpu(hdr->id);
1389 		if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
1390 			goto next;
1391 
1392 		handler = &__fw_element_handlers[elem_id];
1393 		if (!handler->fn)
1394 			goto next;
1395 
1396 		ret = handler->fn(rtwdev, hdr, handler->arg);
1397 		if (ret == 1) /* ignore this element */
1398 			goto next;
1399 		if (ret)
1400 			return ret;
1401 
1402 		if (handler->name)
1403 			rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
1404 				   handler->name, hdr->ver);
1405 
1406 		unrecognized_elements &= ~BIT(elem_id);
1407 next:
1408 		offset += sizeof(*hdr) + elm_size;
1409 		offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1410 	}
1411 
1412 	if (unrecognized_elements) {
1413 		rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
1414 			  unrecognized_elements);
1415 		return -ENOENT;
1416 	}
1417 
1418 	return 0;
1419 }
1420 
1421 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
1422 			   u8 type, u8 cat, u8 class, u8 func,
1423 			   bool rack, bool dack, u32 len)
1424 {
1425 	struct fwcmd_hdr *hdr;
1426 
1427 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1428 
1429 	if (!(rtwdev->fw.h2c_seq % 4))
1430 		rack = true;
1431 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1432 				FIELD_PREP(H2C_HDR_CAT, cat) |
1433 				FIELD_PREP(H2C_HDR_CLASS, class) |
1434 				FIELD_PREP(H2C_HDR_FUNC, func) |
1435 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1436 
1437 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1438 					   len + H2C_HEADER_LEN) |
1439 				(rack ? H2C_HDR_REC_ACK : 0) |
1440 				(dack ? H2C_HDR_DONE_ACK : 0));
1441 
1442 	rtwdev->fw.h2c_seq++;
1443 }
1444 
1445 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
1446 				       struct sk_buff *skb,
1447 				       u8 type, u8 cat, u8 class, u8 func,
1448 				       u32 len)
1449 {
1450 	struct fwcmd_hdr *hdr;
1451 
1452 	hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1453 
1454 	hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1455 				FIELD_PREP(H2C_HDR_CAT, cat) |
1456 				FIELD_PREP(H2C_HDR_CLASS, class) |
1457 				FIELD_PREP(H2C_HDR_FUNC, func) |
1458 				FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1459 
1460 	hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1461 					   len + H2C_HEADER_LEN));
1462 }
1463 
1464 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
1465 					    struct rtw89_fw_bin_info *info,
1466 					    struct rtw89_fw_hdr *fw_hdr)
1467 {
1468 	struct rtw89_fw_hdr_section_info *section_info;
1469 	struct rtw89_fw_hdr_section *section;
1470 	int i;
1471 
1472 	le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
1473 			   FW_HDR_W7_PART_SIZE);
1474 
1475 	for (i = 0; i < info->section_num; i++) {
1476 		section_info = &info->section_info[i];
1477 
1478 		if (!section_info->len_override)
1479 			continue;
1480 
1481 		section = &fw_hdr->sections[i];
1482 		le32p_replace_bits(&section->w1, section_info->len_override,
1483 				   FWSECTION_HDR_W1_SEC_SIZE);
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
1490 					    struct rtw89_fw_bin_info *info,
1491 					    struct rtw89_fw_hdr_v1 *fw_hdr)
1492 {
1493 	struct rtw89_fw_hdr_section_info *section_info;
1494 	struct rtw89_fw_hdr_section_v1 *section;
1495 	u8 dst_sec_idx = 0;
1496 	u8 sec_idx;
1497 
1498 	le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
1499 			   FW_HDR_V1_W7_PART_SIZE);
1500 
1501 	for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
1502 		section_info = &info->section_info[sec_idx];
1503 		section = &fw_hdr->sections[sec_idx];
1504 
1505 		if (section_info->ignore)
1506 			continue;
1507 
1508 		if (dst_sec_idx != sec_idx)
1509 			fw_hdr->sections[dst_sec_idx] = *section;
1510 
1511 		dst_sec_idx++;
1512 	}
1513 
1514 	le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
1515 
1516 	return (info->section_num - dst_sec_idx) * sizeof(*section);
1517 }
1518 
1519 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1520 				   const struct rtw89_fw_suit *fw_suit,
1521 				   struct rtw89_fw_bin_info *info)
1522 {
1523 	u32 len = info->hdr_len - info->dynamic_hdr_len;
1524 	struct rtw89_fw_hdr_v1 *fw_hdr_v1;
1525 	const u8 *fw = fw_suit->data;
1526 	struct rtw89_fw_hdr *fw_hdr;
1527 	struct sk_buff *skb;
1528 	u32 truncated;
1529 	u32 ret = 0;
1530 
1531 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1532 	if (!skb) {
1533 		rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
1534 		return -ENOMEM;
1535 	}
1536 
1537 	skb_put_data(skb, fw, len);
1538 
1539 	switch (fw_suit->hdr_ver) {
1540 	case 0:
1541 		fw_hdr = (struct rtw89_fw_hdr *)skb->data;
1542 		truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
1543 		break;
1544 	case 1:
1545 		fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
1546 		truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
1547 		break;
1548 	default:
1549 		ret = -EOPNOTSUPP;
1550 		goto fail;
1551 	}
1552 
1553 	if (truncated) {
1554 		len -= truncated;
1555 		skb_trim(skb, len);
1556 	}
1557 
1558 	rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
1559 				   H2C_CAT_MAC, H2C_CL_MAC_FWDL,
1560 				   H2C_FUNC_MAC_FWHDR_DL, len);
1561 
1562 	ret = rtw89_h2c_tx(rtwdev, skb, false);
1563 	if (ret) {
1564 		rtw89_err(rtwdev, "failed to send h2c\n");
1565 		goto fail;
1566 	}
1567 
1568 	return 0;
1569 fail:
1570 	dev_kfree_skb_any(skb);
1571 
1572 	return ret;
1573 }
1574 
1575 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1576 				 const struct rtw89_fw_suit *fw_suit,
1577 				 struct rtw89_fw_bin_info *info)
1578 {
1579 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1580 	int ret;
1581 
1582 	ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
1583 	if (ret) {
1584 		rtw89_err(rtwdev, "[ERR]FW header download\n");
1585 		return ret;
1586 	}
1587 
1588 	ret = mac->fwdl_check_path_ready(rtwdev, false);
1589 	if (ret) {
1590 		rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
1591 		return ret;
1592 	}
1593 
1594 	rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
1595 	rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
1596 
1597 	return 0;
1598 }
1599 
1600 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1601 				    struct rtw89_fw_hdr_section_info *info)
1602 {
1603 	struct sk_buff *skb;
1604 	const u8 *section = info->addr;
1605 	u32 residue_len = info->len;
1606 	bool copy_key = false;
1607 	u32 pkt_len;
1608 	int ret;
1609 
1610 	if (info->ignore)
1611 		return 0;
1612 
1613 	if (info->len_override) {
1614 		if (info->len_override > info->len)
1615 			rtw89_warn(rtwdev, "override length %u larger than original %u\n",
1616 				   info->len_override, info->len);
1617 		else
1618 			residue_len = info->len_override;
1619 	}
1620 
1621 	if (info->key_addr && info->key_len) {
1622 		if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len)
1623 			rtw89_warn(rtwdev,
1624 				   "ignore to copy key data because of len %d, %d, %d, %d\n",
1625 				   info->len, FWDL_SECTION_PER_PKT_LEN,
1626 				   info->key_len, residue_len);
1627 		else
1628 			copy_key = true;
1629 	}
1630 
1631 	while (residue_len) {
1632 		if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
1633 			pkt_len = FWDL_SECTION_PER_PKT_LEN;
1634 		else
1635 			pkt_len = residue_len;
1636 
1637 		skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
1638 		if (!skb) {
1639 			rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1640 			return -ENOMEM;
1641 		}
1642 		skb_put_data(skb, section, pkt_len);
1643 
1644 		if (copy_key)
1645 			memcpy(skb->data + pkt_len - info->key_len,
1646 			       info->key_addr, info->key_len);
1647 
1648 		ret = rtw89_h2c_tx(rtwdev, skb, true);
1649 		if (ret) {
1650 			rtw89_err(rtwdev, "failed to send h2c\n");
1651 			goto fail;
1652 		}
1653 
1654 		section += pkt_len;
1655 		residue_len -= pkt_len;
1656 	}
1657 
1658 	return 0;
1659 fail:
1660 	dev_kfree_skb_any(skb);
1661 
1662 	return ret;
1663 }
1664 
1665 static enum rtw89_fwdl_check_type
1666 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev,
1667 				     const struct rtw89_fw_suit *fw_suit)
1668 {
1669 	switch (fw_suit->type) {
1670 	case RTW89_FW_BBMCU0:
1671 		return RTW89_FWDL_CHECK_BB0_FWDL_DONE;
1672 	case RTW89_FW_BBMCU1:
1673 		return RTW89_FWDL_CHECK_BB1_FWDL_DONE;
1674 	default:
1675 		return RTW89_FWDL_CHECK_WCPU_FWDL_DONE;
1676 	}
1677 }
1678 
1679 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1680 				  const struct rtw89_fw_suit *fw_suit,
1681 				  struct rtw89_fw_bin_info *info)
1682 {
1683 	struct rtw89_fw_hdr_section_info *section_info = info->section_info;
1684 	const struct rtw89_chip_info *chip = rtwdev->chip;
1685 	enum rtw89_fwdl_check_type chk_type;
1686 	u8 section_num = info->section_num;
1687 	int ret;
1688 
1689 	while (section_num--) {
1690 		ret = __rtw89_fw_download_main(rtwdev, section_info);
1691 		if (ret)
1692 			return ret;
1693 		section_info++;
1694 	}
1695 
1696 	if (chip->chip_gen == RTW89_CHIP_AX)
1697 		return 0;
1698 
1699 	chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit);
1700 	ret = rtw89_fw_check_rdy(rtwdev, chk_type);
1701 	if (ret) {
1702 		rtw89_warn(rtwdev, "failed to download firmware type %u\n",
1703 			   fw_suit->type);
1704 		return ret;
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
1711 {
1712 	enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
1713 	u32 addr = R_AX_DBG_PORT_SEL;
1714 	u32 val32;
1715 	u16 index;
1716 
1717 	if (chip_gen == RTW89_CHIP_BE) {
1718 		addr = R_BE_WLCPU_PORT_PC;
1719 		goto dump;
1720 	}
1721 
1722 	rtw89_write32(rtwdev, R_AX_DBG_CTRL,
1723 		      FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
1724 		      FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
1725 	rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
1726 
1727 dump:
1728 	for (index = 0; index < 15; index++) {
1729 		val32 = rtw89_read32(rtwdev, addr);
1730 		rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
1731 		fsleep(10);
1732 	}
1733 }
1734 
1735 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
1736 {
1737 	u32 val32;
1738 
1739 	val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
1740 	rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
1741 
1742 	val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG);
1743 	rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32);
1744 
1745 	rtw89_fw_prog_cnt_dump(rtwdev);
1746 }
1747 
1748 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
1749 				  struct rtw89_fw_suit *fw_suit)
1750 {
1751 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1752 	struct rtw89_fw_bin_info info = {};
1753 	int ret;
1754 
1755 	ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
1756 	if (ret) {
1757 		rtw89_err(rtwdev, "parse fw header fail\n");
1758 		return ret;
1759 	}
1760 
1761 	rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode);
1762 
1763 	if (rtwdev->chip->chip_id == RTL8922A &&
1764 	    (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN))
1765 		rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000);
1766 
1767 	ret = mac->fwdl_check_path_ready(rtwdev, true);
1768 	if (ret) {
1769 		rtw89_err(rtwdev, "[ERR]H2C path ready\n");
1770 		return ret;
1771 	}
1772 
1773 	ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
1774 	if (ret)
1775 		return ret;
1776 
1777 	ret = rtw89_fw_download_main(rtwdev, fw_suit, &info);
1778 	if (ret)
1779 		return ret;
1780 
1781 	return 0;
1782 }
1783 
1784 static
1785 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1786 			bool include_bb)
1787 {
1788 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1789 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
1790 	struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
1791 	u8 bbmcu_nr = rtwdev->chip->bbmcu_nr;
1792 	int ret;
1793 	int i;
1794 
1795 	mac->disable_cpu(rtwdev);
1796 	ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb);
1797 	if (ret)
1798 		return ret;
1799 
1800 	ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1801 	if (ret)
1802 		goto fwdl_err;
1803 
1804 	for (i = 0; i < bbmcu_nr && include_bb; i++) {
1805 		fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i);
1806 
1807 		ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1808 		if (ret)
1809 			goto fwdl_err;
1810 	}
1811 
1812 	fw_info->h2c_seq = 0;
1813 	fw_info->rec_seq = 0;
1814 	fw_info->h2c_counter = 0;
1815 	fw_info->c2h_counter = 0;
1816 	rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
1817 	rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
1818 
1819 	mdelay(5);
1820 
1821 	ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
1822 	if (ret) {
1823 		rtw89_warn(rtwdev, "download firmware fail\n");
1824 		goto fwdl_err;
1825 	}
1826 
1827 	return ret;
1828 
1829 fwdl_err:
1830 	rtw89_fw_dl_fail_dump(rtwdev);
1831 	return ret;
1832 }
1833 
1834 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1835 		      bool include_bb)
1836 {
1837 	int retry;
1838 	int ret;
1839 
1840 	for (retry = 0; retry < 5; retry++) {
1841 		ret = __rtw89_fw_download(rtwdev, type, include_bb);
1842 		if (!ret)
1843 			return 0;
1844 	}
1845 
1846 	return ret;
1847 }
1848 
1849 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
1850 {
1851 	struct rtw89_fw_info *fw = &rtwdev->fw;
1852 
1853 	wait_for_completion(&fw->req.completion);
1854 	if (!fw->req.firmware)
1855 		return -EINVAL;
1856 
1857 	return 0;
1858 }
1859 
1860 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
1861 				   struct rtw89_fw_req_info *req,
1862 				   const char *fw_name, bool nowarn)
1863 {
1864 	int ret;
1865 
1866 	if (req->firmware) {
1867 		rtw89_debug(rtwdev, RTW89_DBG_FW,
1868 			    "full firmware has been early requested\n");
1869 		complete_all(&req->completion);
1870 		return 0;
1871 	}
1872 
1873 	if (nowarn)
1874 		ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
1875 	else
1876 		ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
1877 
1878 	complete_all(&req->completion);
1879 
1880 	return ret;
1881 }
1882 
1883 void rtw89_load_firmware_work(struct work_struct *work)
1884 {
1885 	struct rtw89_dev *rtwdev =
1886 		container_of(work, struct rtw89_dev, load_firmware_work);
1887 	const struct rtw89_chip_info *chip = rtwdev->chip;
1888 	char fw_name[64];
1889 
1890 	rtw89_fw_get_filename(fw_name, sizeof(fw_name),
1891 			      chip->fw_basename, rtwdev->fw.fw_format);
1892 
1893 	rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
1894 }
1895 
1896 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
1897 {
1898 	if (!tbl)
1899 		return;
1900 
1901 	kfree(tbl->regs);
1902 	kfree(tbl);
1903 }
1904 
1905 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
1906 {
1907 	struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1908 	int i;
1909 
1910 	rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
1911 	rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
1912 	for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
1913 		rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
1914 	rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
1915 
1916 	kfree(elm_info->txpwr_trk);
1917 	kfree(elm_info->rfk_log_fmt);
1918 }
1919 
1920 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
1921 {
1922 	struct rtw89_fw_info *fw = &rtwdev->fw;
1923 
1924 	cancel_work_sync(&rtwdev->load_firmware_work);
1925 
1926 	if (fw->req.firmware) {
1927 		release_firmware(fw->req.firmware);
1928 
1929 		/* assign NULL back in case rtw89_free_ieee80211_hw()
1930 		 * try to release the same one again.
1931 		 */
1932 		fw->req.firmware = NULL;
1933 	}
1934 
1935 	kfree(fw->log.fmts);
1936 	rtw89_unload_firmware_elements(rtwdev);
1937 }
1938 
1939 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
1940 {
1941 	struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
1942 	u32 i;
1943 
1944 	if (fmt_id > fw_log->last_fmt_id)
1945 		return 0;
1946 
1947 	for (i = 0; i < fw_log->fmt_count; i++) {
1948 		if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
1949 			return i;
1950 	}
1951 	return 0;
1952 }
1953 
1954 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
1955 {
1956 	struct rtw89_fw_log *log = &rtwdev->fw.log;
1957 	const struct rtw89_fw_logsuit_hdr *suit_hdr;
1958 	struct rtw89_fw_suit *suit = &log->suit;
1959 	const void *fmts_ptr, *fmts_end_ptr;
1960 	u32 fmt_count;
1961 	int i;
1962 
1963 	suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
1964 	fmt_count = le32_to_cpu(suit_hdr->count);
1965 	log->fmt_ids = suit_hdr->ids;
1966 	fmts_ptr = &suit_hdr->ids[fmt_count];
1967 	fmts_end_ptr = suit->data + suit->size;
1968 	log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
1969 	if (!log->fmts)
1970 		return -ENOMEM;
1971 
1972 	for (i = 0; i < fmt_count; i++) {
1973 		fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
1974 		if (!fmts_ptr)
1975 			break;
1976 
1977 		(*log->fmts)[i] = fmts_ptr;
1978 		log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
1979 		log->fmt_count++;
1980 		fmts_ptr += strlen(fmts_ptr);
1981 	}
1982 
1983 	return 0;
1984 }
1985 
1986 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
1987 {
1988 	struct rtw89_fw_log *log = &rtwdev->fw.log;
1989 	struct rtw89_fw_suit *suit = &log->suit;
1990 
1991 	if (!suit || !suit->data) {
1992 		rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
1993 		return -EINVAL;
1994 	}
1995 	if (log->fmts)
1996 		return 0;
1997 
1998 	return rtw89_fw_log_create_fmts_dict(rtwdev);
1999 }
2000 
2001 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
2002 				   const struct rtw89_fw_c2h_log_fmt *log_fmt,
2003 				   u32 fmt_idx, u8 para_int, bool raw_data)
2004 {
2005 	const char *(*fmts)[] = rtwdev->fw.log.fmts;
2006 	char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
2007 	u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
2008 	int i;
2009 
2010 	if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
2011 		rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
2012 			   log_fmt->argc);
2013 		return;
2014 	}
2015 
2016 	if (para_int)
2017 		for (i = 0 ; i < log_fmt->argc; i++)
2018 			args[i] = le32_to_cpu(log_fmt->u.argv[i]);
2019 
2020 	if (raw_data) {
2021 		if (para_int)
2022 			snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
2023 				 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
2024 				 para_int, log_fmt->argc, (int)sizeof(args), args);
2025 		else
2026 			snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
2027 				 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
2028 				 para_int, log_fmt->argc, log_fmt->u.raw);
2029 	} else {
2030 		snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
2031 			 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
2032 			 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
2033 			 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
2034 			 args[0xf]);
2035 	}
2036 
2037 	rtw89_info(rtwdev, "C2H log: %s", str_buf);
2038 }
2039 
2040 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
2041 {
2042 	const struct rtw89_fw_c2h_log_fmt *log_fmt;
2043 	u8 para_int;
2044 	u32 fmt_idx;
2045 
2046 	if (len < RTW89_C2H_HEADER_LEN) {
2047 		rtw89_err(rtwdev, "c2h log length is wrong!\n");
2048 		return;
2049 	}
2050 
2051 	buf += RTW89_C2H_HEADER_LEN;
2052 	len -= RTW89_C2H_HEADER_LEN;
2053 	log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
2054 
2055 	if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
2056 		goto plain_log;
2057 
2058 	if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
2059 		goto plain_log;
2060 
2061 	if (!rtwdev->fw.log.fmts)
2062 		return;
2063 
2064 	para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
2065 	fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
2066 
2067 	if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
2068 		rtw89_info(rtwdev, "C2H log: %s%s",
2069 			   (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
2070 	else if (fmt_idx != 0 && para_int)
2071 		rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
2072 	else
2073 		rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
2074 	return;
2075 
2076 plain_log:
2077 	rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
2078 
2079 }
2080 
2081 #define H2C_CAM_LEN 60
2082 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
2083 		     struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr)
2084 {
2085 	struct sk_buff *skb;
2086 	int ret;
2087 
2088 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
2089 	if (!skb) {
2090 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2091 		return -ENOMEM;
2092 	}
2093 	skb_put(skb, H2C_CAM_LEN);
2094 	rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr,
2095 				     skb->data);
2096 	rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data);
2097 
2098 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2099 			      H2C_CAT_MAC,
2100 			      H2C_CL_MAC_ADDR_CAM_UPDATE,
2101 			      H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
2102 			      H2C_CAM_LEN);
2103 
2104 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2105 	if (ret) {
2106 		rtw89_err(rtwdev, "failed to send h2c\n");
2107 		goto fail;
2108 	}
2109 
2110 	return 0;
2111 fail:
2112 	dev_kfree_skb_any(skb);
2113 
2114 	return ret;
2115 }
2116 
2117 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
2118 				 struct rtw89_vif_link *rtwvif_link,
2119 				 struct rtw89_sta_link *rtwsta_link)
2120 {
2121 	struct rtw89_h2c_dctlinfo_ud_v1 *h2c;
2122 	u32 len = sizeof(*h2c);
2123 	struct sk_buff *skb;
2124 	int ret;
2125 
2126 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2127 	if (!skb) {
2128 		rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2129 		return -ENOMEM;
2130 	}
2131 	skb_put(skb, len);
2132 	h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data;
2133 
2134 	rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c);
2135 
2136 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2137 			      H2C_CAT_MAC,
2138 			      H2C_CL_MAC_FR_EXCHG,
2139 			      H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
2140 			      len);
2141 
2142 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2143 	if (ret) {
2144 		rtw89_err(rtwdev, "failed to send h2c\n");
2145 		goto fail;
2146 	}
2147 
2148 	return 0;
2149 fail:
2150 	dev_kfree_skb_any(skb);
2151 
2152 	return ret;
2153 }
2154 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
2155 
2156 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
2157 				 struct rtw89_vif_link *rtwvif_link,
2158 				 struct rtw89_sta_link *rtwsta_link)
2159 {
2160 	struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
2161 	u32 len = sizeof(*h2c);
2162 	struct sk_buff *skb;
2163 	int ret;
2164 
2165 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2166 	if (!skb) {
2167 		rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2168 		return -ENOMEM;
2169 	}
2170 	skb_put(skb, len);
2171 	h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
2172 
2173 	rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c);
2174 
2175 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2176 			      H2C_CAT_MAC,
2177 			      H2C_CL_MAC_FR_EXCHG,
2178 			      H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
2179 			      len);
2180 
2181 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2182 	if (ret) {
2183 		rtw89_err(rtwdev, "failed to send h2c\n");
2184 		goto fail;
2185 	}
2186 
2187 	return 0;
2188 fail:
2189 	dev_kfree_skb_any(skb);
2190 
2191 	return ret;
2192 }
2193 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
2194 
2195 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
2196 				     struct rtw89_vif_link *rtwvif_link,
2197 				     struct rtw89_sta_link *rtwsta_link)
2198 {
2199 	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
2200 	struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
2201 	u32 len = sizeof(*h2c);
2202 	struct sk_buff *skb;
2203 	int ret;
2204 
2205 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2206 	if (!skb) {
2207 		rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
2208 		return -ENOMEM;
2209 	}
2210 	skb_put(skb, len);
2211 	h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
2212 
2213 	h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
2214 		  le32_encode_bits(1, DCTLINFO_V2_C0_OP);
2215 
2216 	h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
2217 	h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
2218 	h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
2219 	h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
2220 	h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
2221 	h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
2222 	h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
2223 	h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
2224 	h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
2225 	h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
2226 	h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
2227 	h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
2228 	h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
2229 
2230 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2231 			      H2C_CAT_MAC,
2232 			      H2C_CL_MAC_FR_EXCHG,
2233 			      H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
2234 			      len);
2235 
2236 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2237 	if (ret) {
2238 		rtw89_err(rtwdev, "failed to send h2c\n");
2239 		goto fail;
2240 	}
2241 
2242 	return 0;
2243 fail:
2244 	dev_kfree_skb_any(skb);
2245 
2246 	return ret;
2247 }
2248 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
2249 
2250 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev,
2251 			struct rtw89_vif_link *rtwvif_link,
2252 			struct rtw89_sta_link *rtwsta_link,
2253 			bool valid, struct ieee80211_ampdu_params *params)
2254 {
2255 	const struct rtw89_chip_info *chip = rtwdev->chip;
2256 	struct rtw89_h2c_ba_cam *h2c;
2257 	u8 macid = rtwsta_link->mac_id;
2258 	u32 len = sizeof(*h2c);
2259 	struct sk_buff *skb;
2260 	u8 entry_idx;
2261 	int ret;
2262 
2263 	ret = valid ?
2264 	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2265 					      &entry_idx) :
2266 	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2267 					      &entry_idx);
2268 	if (ret) {
2269 		/* it still works even if we don't have static BA CAM, because
2270 		 * hardware can create dynamic BA CAM automatically.
2271 		 */
2272 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2273 			    "failed to %s entry tid=%d for h2c ba cam\n",
2274 			    valid ? "alloc" : "free", params->tid);
2275 		return 0;
2276 	}
2277 
2278 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2279 	if (!skb) {
2280 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2281 		return -ENOMEM;
2282 	}
2283 	skb_put(skb, len);
2284 	h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2285 
2286 	h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
2287 	if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
2288 		h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
2289 	else
2290 		h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
2291 	if (!valid)
2292 		goto end;
2293 	h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
2294 		   le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
2295 	if (params->buf_size > 64)
2296 		h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2297 	else
2298 		h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2299 	/* If init req is set, hw will set the ssn */
2300 	h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
2301 		   le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
2302 
2303 	if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
2304 		h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
2305 			   le32_encode_bits(rtwvif_link->mac_idx,
2306 					    RTW89_H2C_BA_CAM_W1_BAND);
2307 	}
2308 
2309 end:
2310 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2311 			      H2C_CAT_MAC,
2312 			      H2C_CL_BA_CAM,
2313 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
2314 			      len);
2315 
2316 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2317 	if (ret) {
2318 		rtw89_err(rtwdev, "failed to send h2c\n");
2319 		goto fail;
2320 	}
2321 
2322 	return 0;
2323 fail:
2324 	dev_kfree_skb_any(skb);
2325 
2326 	return ret;
2327 }
2328 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
2329 
2330 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
2331 					   u8 entry_idx, u8 uid)
2332 {
2333 	struct rtw89_h2c_ba_cam *h2c;
2334 	u32 len = sizeof(*h2c);
2335 	struct sk_buff *skb;
2336 	int ret;
2337 
2338 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2339 	if (!skb) {
2340 		rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
2341 		return -ENOMEM;
2342 	}
2343 	skb_put(skb, len);
2344 	h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2345 
2346 	h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
2347 	h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
2348 		  le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
2349 		  le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
2350 		  le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
2351 
2352 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2353 			      H2C_CAT_MAC,
2354 			      H2C_CL_BA_CAM,
2355 			      H2C_FUNC_MAC_BA_CAM, 0, 1,
2356 			      len);
2357 
2358 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2359 	if (ret) {
2360 		rtw89_err(rtwdev, "failed to send h2c\n");
2361 		goto fail;
2362 	}
2363 
2364 	return 0;
2365 fail:
2366 	dev_kfree_skb_any(skb);
2367 
2368 	return ret;
2369 }
2370 
2371 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
2372 {
2373 	const struct rtw89_chip_info *chip = rtwdev->chip;
2374 	u8 entry_idx = chip->bacam_num;
2375 	u8 uid = 0;
2376 	int i;
2377 
2378 	for (i = 0; i < chip->bacam_dynamic_num; i++) {
2379 		rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
2380 		entry_idx++;
2381 		uid++;
2382 	}
2383 }
2384 
2385 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev,
2386 			   struct rtw89_vif_link *rtwvif_link,
2387 			   struct rtw89_sta_link *rtwsta_link,
2388 			   bool valid, struct ieee80211_ampdu_params *params)
2389 {
2390 	const struct rtw89_chip_info *chip = rtwdev->chip;
2391 	struct rtw89_h2c_ba_cam_v1 *h2c;
2392 	u8 macid = rtwsta_link->mac_id;
2393 	u32 len = sizeof(*h2c);
2394 	struct sk_buff *skb;
2395 	u8 entry_idx;
2396 	u8 bmap_size;
2397 	int ret;
2398 
2399 	ret = valid ?
2400 	      rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2401 					      &entry_idx) :
2402 	      rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2403 					      &entry_idx);
2404 	if (ret) {
2405 		/* it still works even if we don't have static BA CAM, because
2406 		 * hardware can create dynamic BA CAM automatically.
2407 		 */
2408 		rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2409 			    "failed to %s entry tid=%d for h2c ba cam\n",
2410 			    valid ? "alloc" : "free", params->tid);
2411 		return 0;
2412 	}
2413 
2414 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2415 	if (!skb) {
2416 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2417 		return -ENOMEM;
2418 	}
2419 	skb_put(skb, len);
2420 	h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
2421 
2422 	if (params->buf_size > 512)
2423 		bmap_size = 10;
2424 	else if (params->buf_size > 256)
2425 		bmap_size = 8;
2426 	else if (params->buf_size > 64)
2427 		bmap_size = 4;
2428 	else
2429 		bmap_size = 0;
2430 
2431 	h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
2432 		  le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
2433 		  le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
2434 		  le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
2435 		  le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
2436 		  le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
2437 
2438 	entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
2439 	h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
2440 		  le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
2441 		  le32_encode_bits(!!rtwvif_link->mac_idx,
2442 				   RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
2443 
2444 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2445 			      H2C_CAT_MAC,
2446 			      H2C_CL_BA_CAM,
2447 			      H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
2448 			      len);
2449 
2450 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2451 	if (ret) {
2452 		rtw89_err(rtwdev, "failed to send h2c\n");
2453 		goto fail;
2454 	}
2455 
2456 	return 0;
2457 fail:
2458 	dev_kfree_skb_any(skb);
2459 
2460 	return ret;
2461 }
2462 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
2463 
2464 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
2465 				   u8 offset, u8 mac_idx)
2466 {
2467 	struct rtw89_h2c_ba_cam_init *h2c;
2468 	u32 len = sizeof(*h2c);
2469 	struct sk_buff *skb;
2470 	int ret;
2471 
2472 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2473 	if (!skb) {
2474 		rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
2475 		return -ENOMEM;
2476 	}
2477 	skb_put(skb, len);
2478 	h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
2479 
2480 	h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
2481 		  le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
2482 		  le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
2483 
2484 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2485 			      H2C_CAT_MAC,
2486 			      H2C_CL_BA_CAM,
2487 			      H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
2488 			      len);
2489 
2490 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2491 	if (ret) {
2492 		rtw89_err(rtwdev, "failed to send h2c\n");
2493 		goto fail;
2494 	}
2495 
2496 	return 0;
2497 fail:
2498 	dev_kfree_skb_any(skb);
2499 
2500 	return ret;
2501 }
2502 
2503 #define H2C_LOG_CFG_LEN 12
2504 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
2505 {
2506 	struct sk_buff *skb;
2507 	u32 comp = 0;
2508 	int ret;
2509 
2510 	if (enable)
2511 		comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
2512 		       BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
2513 		       BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN);
2514 
2515 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
2516 	if (!skb) {
2517 		rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
2518 		return -ENOMEM;
2519 	}
2520 
2521 	skb_put(skb, H2C_LOG_CFG_LEN);
2522 	SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
2523 	SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
2524 	SET_LOG_CFG_COMP(skb->data, comp);
2525 	SET_LOG_CFG_COMP_EXT(skb->data, 0);
2526 
2527 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2528 			      H2C_CAT_MAC,
2529 			      H2C_CL_FW_INFO,
2530 			      H2C_FUNC_LOG_CFG, 0, 0,
2531 			      H2C_LOG_CFG_LEN);
2532 
2533 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2534 	if (ret) {
2535 		rtw89_err(rtwdev, "failed to send h2c\n");
2536 		goto fail;
2537 	}
2538 
2539 	return 0;
2540 fail:
2541 	dev_kfree_skb_any(skb);
2542 
2543 	return ret;
2544 }
2545 
2546 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
2547 				       struct rtw89_vif_link *rtwvif_link)
2548 {
2549 	static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88,
2550 				     0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
2551 	u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2552 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2553 	struct rtw89_eapol_2_of_2 *eapol_pkt;
2554 	struct ieee80211_bss_conf *bss_conf;
2555 	struct ieee80211_hdr_3addr *hdr;
2556 	struct sk_buff *skb;
2557 	u8 key_des_ver;
2558 
2559 	if (rtw_wow->ptk_alg == 3)
2560 		key_des_ver = 1;
2561 	else if (rtw_wow->akm == 1 || rtw_wow->akm == 2)
2562 		key_des_ver = 2;
2563 	else if (rtw_wow->akm > 2 && rtw_wow->akm < 7)
2564 		key_des_ver = 3;
2565 	else
2566 		key_des_ver = 0;
2567 
2568 	skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt));
2569 	if (!skb)
2570 		return NULL;
2571 
2572 	hdr = skb_put_zero(skb, sizeof(*hdr));
2573 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2574 					 IEEE80211_FCTL_TODS |
2575 					 IEEE80211_FCTL_PROTECTED);
2576 
2577 	rcu_read_lock();
2578 
2579 	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2580 
2581 	ether_addr_copy(hdr->addr1, bss_conf->bssid);
2582 	ether_addr_copy(hdr->addr2, bss_conf->addr);
2583 	ether_addr_copy(hdr->addr3, bss_conf->bssid);
2584 
2585 	rcu_read_unlock();
2586 
2587 	skb_put_zero(skb, sec_hdr_len);
2588 
2589 	eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
2590 	memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody));
2591 	eapol_pkt->key_des_ver = key_des_ver;
2592 
2593 	return skb;
2594 }
2595 
2596 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
2597 					  struct rtw89_vif_link *rtwvif_link)
2598 {
2599 	u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2600 	struct ieee80211_bss_conf *bss_conf;
2601 	struct ieee80211_hdr_3addr *hdr;
2602 	struct rtw89_sa_query *sa_query;
2603 	struct sk_buff *skb;
2604 
2605 	skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query));
2606 	if (!skb)
2607 		return NULL;
2608 
2609 	hdr = skb_put_zero(skb, sizeof(*hdr));
2610 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2611 					 IEEE80211_STYPE_ACTION |
2612 					 IEEE80211_FCTL_PROTECTED);
2613 
2614 	rcu_read_lock();
2615 
2616 	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2617 
2618 	ether_addr_copy(hdr->addr1, bss_conf->bssid);
2619 	ether_addr_copy(hdr->addr2, bss_conf->addr);
2620 	ether_addr_copy(hdr->addr3, bss_conf->bssid);
2621 
2622 	rcu_read_unlock();
2623 
2624 	skb_put_zero(skb, sec_hdr_len);
2625 
2626 	sa_query = skb_put_zero(skb, sizeof(*sa_query));
2627 	sa_query->category = WLAN_CATEGORY_SA_QUERY;
2628 	sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE;
2629 
2630 	return skb;
2631 }
2632 
2633 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
2634 					      struct rtw89_vif_link *rtwvif_link)
2635 {
2636 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
2637 	u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2638 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2639 	struct ieee80211_hdr_3addr *hdr;
2640 	struct rtw89_arp_rsp *arp_skb;
2641 	struct arphdr *arp_hdr;
2642 	struct sk_buff *skb;
2643 	__le16 fc;
2644 
2645 	skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb));
2646 	if (!skb)
2647 		return NULL;
2648 
2649 	hdr = skb_put_zero(skb, sizeof(*hdr));
2650 
2651 	if (rtw_wow->ptk_alg)
2652 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS |
2653 				 IEEE80211_FCTL_PROTECTED);
2654 	else
2655 		fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
2656 
2657 	hdr->frame_control = fc;
2658 	ether_addr_copy(hdr->addr1, rtwvif_link->bssid);
2659 	ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr);
2660 	ether_addr_copy(hdr->addr3, rtwvif_link->bssid);
2661 
2662 	skb_put_zero(skb, sec_hdr_len);
2663 
2664 	arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
2665 	memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
2666 	arp_skb->llc_type = htons(ETH_P_ARP);
2667 
2668 	arp_hdr = &arp_skb->arp_hdr;
2669 	arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
2670 	arp_hdr->ar_pro = htons(ETH_P_IP);
2671 	arp_hdr->ar_hln = ETH_ALEN;
2672 	arp_hdr->ar_pln = 4;
2673 	arp_hdr->ar_op = htons(ARPOP_REPLY);
2674 
2675 	ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr);
2676 	arp_skb->sender_ip = rtwvif->ip_addr;
2677 
2678 	return skb;
2679 }
2680 
2681 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
2682 					struct rtw89_vif_link *rtwvif_link,
2683 					enum rtw89_fw_pkt_ofld_type type,
2684 					u8 *id)
2685 {
2686 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
2687 	int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
2688 	struct rtw89_pktofld_info *info;
2689 	struct sk_buff *skb;
2690 	int ret;
2691 
2692 	info = kzalloc(sizeof(*info), GFP_KERNEL);
2693 	if (!info)
2694 		return -ENOMEM;
2695 
2696 	switch (type) {
2697 	case RTW89_PKT_OFLD_TYPE_PS_POLL:
2698 		skb = ieee80211_pspoll_get(rtwdev->hw, vif);
2699 		break;
2700 	case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
2701 		skb = ieee80211_proberesp_get(rtwdev->hw, vif);
2702 		break;
2703 	case RTW89_PKT_OFLD_TYPE_NULL_DATA:
2704 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
2705 		break;
2706 	case RTW89_PKT_OFLD_TYPE_QOS_NULL:
2707 		skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
2708 		break;
2709 	case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
2710 		skb = rtw89_eapol_get(rtwdev, rtwvif_link);
2711 		break;
2712 	case RTW89_PKT_OFLD_TYPE_SA_QUERY:
2713 		skb = rtw89_sa_query_get(rtwdev, rtwvif_link);
2714 		break;
2715 	case RTW89_PKT_OFLD_TYPE_ARP_RSP:
2716 		skb = rtw89_arp_response_get(rtwdev, rtwvif_link);
2717 		break;
2718 	default:
2719 		goto err;
2720 	}
2721 
2722 	if (!skb)
2723 		goto err;
2724 
2725 	ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
2726 	kfree_skb(skb);
2727 
2728 	if (ret)
2729 		goto err;
2730 
2731 	list_add_tail(&info->list, &rtwvif_link->general_pkt_list);
2732 	*id = info->id;
2733 	return 0;
2734 
2735 err:
2736 	kfree(info);
2737 	return -ENOMEM;
2738 }
2739 
2740 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
2741 					   struct rtw89_vif_link *rtwvif_link,
2742 					   bool notify_fw)
2743 {
2744 	struct list_head *pkt_list = &rtwvif_link->general_pkt_list;
2745 	struct rtw89_pktofld_info *info, *tmp;
2746 
2747 	list_for_each_entry_safe(info, tmp, pkt_list, list) {
2748 		if (notify_fw)
2749 			rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
2750 		else
2751 			rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
2752 		list_del(&info->list);
2753 		kfree(info);
2754 	}
2755 }
2756 
2757 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
2758 {
2759 	struct rtw89_vif_link *rtwvif_link;
2760 	struct rtw89_vif *rtwvif;
2761 	unsigned int link_id;
2762 
2763 	rtw89_for_each_rtwvif(rtwdev, rtwvif)
2764 		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
2765 			rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link,
2766 							      notify_fw);
2767 }
2768 
2769 #define H2C_GENERAL_PKT_LEN 6
2770 #define H2C_GENERAL_PKT_ID_UND 0xff
2771 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
2772 			     struct rtw89_vif_link *rtwvif_link, u8 macid)
2773 {
2774 	u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
2775 	u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
2776 	u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
2777 	struct sk_buff *skb;
2778 	int ret;
2779 
2780 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2781 				     RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
2782 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2783 				     RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
2784 	rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2785 				     RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
2786 
2787 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
2788 	if (!skb) {
2789 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2790 		return -ENOMEM;
2791 	}
2792 	skb_put(skb, H2C_GENERAL_PKT_LEN);
2793 	SET_GENERAL_PKT_MACID(skb->data, macid);
2794 	SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
2795 	SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
2796 	SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
2797 	SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
2798 	SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
2799 
2800 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2801 			      H2C_CAT_MAC,
2802 			      H2C_CL_FW_INFO,
2803 			      H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
2804 			      H2C_GENERAL_PKT_LEN);
2805 
2806 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2807 	if (ret) {
2808 		rtw89_err(rtwdev, "failed to send h2c\n");
2809 		goto fail;
2810 	}
2811 
2812 	return 0;
2813 fail:
2814 	dev_kfree_skb_any(skb);
2815 
2816 	return ret;
2817 }
2818 
2819 #define H2C_LPS_PARM_LEN 8
2820 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
2821 			  struct rtw89_lps_parm *lps_param)
2822 {
2823 	struct sk_buff *skb;
2824 	int ret;
2825 
2826 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
2827 	if (!skb) {
2828 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2829 		return -ENOMEM;
2830 	}
2831 	skb_put(skb, H2C_LPS_PARM_LEN);
2832 
2833 	SET_LPS_PARM_MACID(skb->data, lps_param->macid);
2834 	SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
2835 	SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
2836 	SET_LPS_PARM_RLBM(skb->data, 1);
2837 	SET_LPS_PARM_SMARTPS(skb->data, 1);
2838 	SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
2839 	SET_LPS_PARM_VOUAPSD(skb->data, 0);
2840 	SET_LPS_PARM_VIUAPSD(skb->data, 0);
2841 	SET_LPS_PARM_BEUAPSD(skb->data, 0);
2842 	SET_LPS_PARM_BKUAPSD(skb->data, 0);
2843 
2844 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2845 			      H2C_CAT_MAC,
2846 			      H2C_CL_MAC_PS,
2847 			      H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode,
2848 			      H2C_LPS_PARM_LEN);
2849 
2850 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2851 	if (ret) {
2852 		rtw89_err(rtwdev, "failed to send h2c\n");
2853 		goto fail;
2854 	}
2855 
2856 	return 0;
2857 fail:
2858 	dev_kfree_skb_any(skb);
2859 
2860 	return ret;
2861 }
2862 
2863 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2864 {
2865 	const struct rtw89_chip_info *chip = rtwdev->chip;
2866 	const struct rtw89_chan *chan;
2867 	struct rtw89_vif_link *rtwvif_link;
2868 	struct rtw89_h2c_lps_ch_info *h2c;
2869 	u32 len = sizeof(*h2c);
2870 	unsigned int link_id;
2871 	struct sk_buff *skb;
2872 	bool no_chan = true;
2873 	u8 phy_idx;
2874 	u32 done;
2875 	int ret;
2876 
2877 	if (chip->chip_gen != RTW89_CHIP_BE)
2878 		return 0;
2879 
2880 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2881 	if (!skb) {
2882 		rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
2883 		return -ENOMEM;
2884 	}
2885 	skb_put(skb, len);
2886 	h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
2887 
2888 	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
2889 		phy_idx = rtwvif_link->phy_idx;
2890 		if (phy_idx >= ARRAY_SIZE(h2c->info))
2891 			continue;
2892 
2893 		chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2894 		no_chan = false;
2895 
2896 		h2c->info[phy_idx].central_ch = chan->channel;
2897 		h2c->info[phy_idx].pri_ch = chan->primary_channel;
2898 		h2c->info[phy_idx].band = chan->band_type;
2899 		h2c->info[phy_idx].bw = chan->band_width;
2900 	}
2901 
2902 	if (no_chan) {
2903 		rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
2904 		ret = -ENOENT;
2905 		goto fail;
2906 	}
2907 
2908 	h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
2909 
2910 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2911 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
2912 			      H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
2913 
2914 	rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
2915 	ret = rtw89_h2c_tx(rtwdev, skb, false);
2916 	if (ret) {
2917 		rtw89_err(rtwdev, "failed to send h2c\n");
2918 		goto fail;
2919 	}
2920 
2921 	ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
2922 				true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
2923 	if (ret)
2924 		rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n");
2925 
2926 	return 0;
2927 fail:
2928 	dev_kfree_skb_any(skb);
2929 
2930 	return ret;
2931 }
2932 
2933 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
2934 				 struct rtw89_vif *rtwvif)
2935 {
2936 	const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
2937 	struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
2938 	static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12};
2939 	const struct rtw89_chip_info *chip = rtwdev->chip;
2940 	struct rtw89_efuse *efuse = &rtwdev->efuse;
2941 	struct rtw89_h2c_lps_ml_cmn_info *h2c;
2942 	struct rtw89_vif_link *rtwvif_link;
2943 	const struct rtw89_chan *chan;
2944 	u8 bw_idx = RTW89_BB_BW_20_40;
2945 	u32 len = sizeof(*h2c);
2946 	unsigned int link_id;
2947 	struct sk_buff *skb;
2948 	u8 beacon_bw_ofst;
2949 	u8 gain_band;
2950 	u32 done;
2951 	u8 path;
2952 	int ret;
2953 	int i;
2954 
2955 	if (chip->chip_gen != RTW89_CHIP_BE)
2956 		return 0;
2957 
2958 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2959 	if (!skb) {
2960 		rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
2961 		return -ENOMEM;
2962 	}
2963 	skb_put(skb, len);
2964 	h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
2965 
2966 	h2c->fmt_id = 0x3;
2967 
2968 	h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
2969 	h2c->rfe_type = efuse->rfe_type;
2970 
2971 	rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
2972 		path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
2973 		chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2974 		gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
2975 
2976 		h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
2977 		h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
2978 		h2c->band[rtwvif_link->phy_idx] = chan->band_type;
2979 		h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
2980 		if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
2981 			h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
2982 		else
2983 			h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
2984 
2985 		/* Fill BW20 RX gain table for beacon mode */
2986 		for (i = 0; i < TIA_GAIN_NUM; i++) {
2987 			h2c->tia_gain[rtwvif_link->phy_idx][i] =
2988 				cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
2989 		}
2990 
2991 		if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) {
2992 			beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx];
2993 			h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst;
2994 		}
2995 
2996 		memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
2997 		       gain->lna_gain[gain_band][bw_idx][path],
2998 		       LNA_GAIN_NUM);
2999 		memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx],
3000 		       gain->tia_lna_op1db[gain_band][bw_idx][path],
3001 		       LNA_GAIN_NUM + 1);
3002 		memcpy(h2c->lna_op1db[rtwvif_link->phy_idx],
3003 		       gain->lna_op1db[gain_band][bw_idx][path],
3004 		       LNA_GAIN_NUM);
3005 	}
3006 
3007 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3008 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
3009 			      H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
3010 
3011 	rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
3012 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3013 	if (ret) {
3014 		rtw89_err(rtwdev, "failed to send h2c\n");
3015 		goto fail;
3016 	}
3017 
3018 	ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
3019 				true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
3020 	if (ret)
3021 		rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
3022 
3023 	return 0;
3024 fail:
3025 	dev_kfree_skb_any(skb);
3026 
3027 	return ret;
3028 }
3029 
3030 #define H2C_P2P_ACT_LEN 20
3031 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
3032 			 struct rtw89_vif_link *rtwvif_link,
3033 			 struct ieee80211_bss_conf *bss_conf,
3034 			 struct ieee80211_p2p_noa_desc *desc,
3035 			 u8 act, u8 noa_id)
3036 {
3037 	bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
3038 	u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow;
3039 	struct sk_buff *skb;
3040 	u8 *cmd;
3041 	int ret;
3042 
3043 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
3044 	if (!skb) {
3045 		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
3046 		return -ENOMEM;
3047 	}
3048 	skb_put(skb, H2C_P2P_ACT_LEN);
3049 	cmd = skb->data;
3050 
3051 	RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id);
3052 	RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
3053 	RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
3054 	RTW89_SET_FWCMD_P2P_ACT(cmd, act);
3055 	RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
3056 	RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
3057 	if (desc) {
3058 		RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
3059 		RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
3060 		RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
3061 		RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
3062 		RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
3063 	}
3064 
3065 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3066 			      H2C_CAT_MAC, H2C_CL_MAC_PS,
3067 			      H2C_FUNC_P2P_ACT, 0, 0,
3068 			      H2C_P2P_ACT_LEN);
3069 
3070 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3071 	if (ret) {
3072 		rtw89_err(rtwdev, "failed to send h2c\n");
3073 		goto fail;
3074 	}
3075 
3076 	return 0;
3077 fail:
3078 	dev_kfree_skb_any(skb);
3079 
3080 	return ret;
3081 }
3082 
3083 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
3084 				       struct sk_buff *skb)
3085 {
3086 	const struct rtw89_chip_info *chip = rtwdev->chip;
3087 	struct rtw89_hal *hal = &rtwdev->hal;
3088 	u8 ntx_path;
3089 	u8 map_b;
3090 
3091 	if (chip->rf_path_num == 1) {
3092 		ntx_path = RF_A;
3093 		map_b = 0;
3094 	} else {
3095 		ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB;
3096 		map_b = ntx_path == RF_AB ? 1 : 0;
3097 	}
3098 
3099 	SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
3100 	SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
3101 	SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
3102 	SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
3103 	SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
3104 }
3105 
3106 #define H2C_CMC_TBL_LEN 68
3107 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
3108 				  struct rtw89_vif_link *rtwvif_link,
3109 				  struct rtw89_sta_link *rtwsta_link)
3110 {
3111 	const struct rtw89_chip_info *chip = rtwdev->chip;
3112 	u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3113 	struct sk_buff *skb;
3114 	int ret;
3115 
3116 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3117 	if (!skb) {
3118 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3119 		return -ENOMEM;
3120 	}
3121 	skb_put(skb, H2C_CMC_TBL_LEN);
3122 	SET_CTRL_INFO_MACID(skb->data, macid);
3123 	SET_CTRL_INFO_OPERATION(skb->data, 1);
3124 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3125 		SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
3126 		__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
3127 		SET_CMC_TBL_ANTSEL_A(skb->data, 0);
3128 		SET_CMC_TBL_ANTSEL_B(skb->data, 0);
3129 		SET_CMC_TBL_ANTSEL_C(skb->data, 0);
3130 		SET_CMC_TBL_ANTSEL_D(skb->data, 0);
3131 	}
3132 	SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
3133 	SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
3134 	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3135 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
3136 
3137 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3138 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3139 			      chip->h2c_cctl_func_id, 0, 1,
3140 			      H2C_CMC_TBL_LEN);
3141 
3142 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3143 	if (ret) {
3144 		rtw89_err(rtwdev, "failed to send h2c\n");
3145 		goto fail;
3146 	}
3147 
3148 	return 0;
3149 fail:
3150 	dev_kfree_skb_any(skb);
3151 
3152 	return ret;
3153 }
3154 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
3155 
3156 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3157 				     struct rtw89_vif_link *rtwvif_link,
3158 				     struct rtw89_sta_link *rtwsta_link)
3159 {
3160 	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3161 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3162 	u32 len = sizeof(*h2c);
3163 	struct sk_buff *skb;
3164 	int ret;
3165 
3166 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3167 	if (!skb) {
3168 		rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3169 		return -ENOMEM;
3170 	}
3171 	skb_put(skb, len);
3172 	h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3173 
3174 	h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3175 		  le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3176 
3177 	h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
3178 	h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
3179 
3180 	h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
3181 		  le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
3182 		  le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3183 	h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
3184 
3185 	h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
3186 
3187 	h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
3188 
3189 	h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3190 	h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
3191 
3192 	h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
3193 		  le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
3194 		  le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
3195 		  le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
3196 		  le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3197 	h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
3198 
3199 	h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
3200 	h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
3201 
3202 	h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
3203 		  le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
3204 		  le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
3205 		  le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
3206 		  le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
3207 	h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
3208 
3209 	h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
3210 
3211 	h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
3212 		   le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
3213 		   le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
3214 	h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
3215 
3216 	h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
3217 		   le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
3218 		   le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
3219 	h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
3220 
3221 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3222 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3223 			      H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3224 			      len);
3225 
3226 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3227 	if (ret) {
3228 		rtw89_err(rtwdev, "failed to send h2c\n");
3229 		goto fail;
3230 	}
3231 
3232 	return 0;
3233 fail:
3234 	dev_kfree_skb_any(skb);
3235 
3236 	return ret;
3237 }
3238 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
3239 
3240 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
3241 				     struct ieee80211_link_sta *link_sta,
3242 				     u8 *pads)
3243 {
3244 	bool ppe_th;
3245 	u8 ppe16, ppe8;
3246 	u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3247 	u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0];
3248 	u8 ru_bitmap;
3249 	u8 n, idx, sh;
3250 	u16 ppe;
3251 	int i;
3252 
3253 	ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
3254 			   link_sta->he_cap.he_cap_elem.phy_cap_info[6]);
3255 	if (!ppe_th) {
3256 		u8 pad;
3257 
3258 		pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
3259 				link_sta->he_cap.he_cap_elem.phy_cap_info[9]);
3260 
3261 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3262 			pads[i] = pad;
3263 
3264 		return;
3265 	}
3266 
3267 	ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
3268 	n = hweight8(ru_bitmap);
3269 	n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3270 
3271 	for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3272 		if (!(ru_bitmap & BIT(i))) {
3273 			pads[i] = 1;
3274 			continue;
3275 		}
3276 
3277 		idx = n >> 3;
3278 		sh = n & 7;
3279 		n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
3280 
3281 		ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx]));
3282 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3283 		sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
3284 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3285 
3286 		if (ppe16 != 7 && ppe8 == 7)
3287 			pads[i] = RTW89_PE_DURATION_16;
3288 		else if (ppe8 != 7)
3289 			pads[i] = RTW89_PE_DURATION_8;
3290 		else
3291 			pads[i] = RTW89_PE_DURATION_0;
3292 	}
3293 }
3294 
3295 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
3296 				struct rtw89_vif_link *rtwvif_link,
3297 				struct rtw89_sta_link *rtwsta_link)
3298 {
3299 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3300 	const struct rtw89_chip_info *chip = rtwdev->chip;
3301 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3302 						       rtwvif_link->chanctx_idx);
3303 	struct ieee80211_link_sta *link_sta;
3304 	struct sk_buff *skb;
3305 	u8 pads[RTW89_PPE_BW_NUM];
3306 	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3307 	u16 lowest_rate;
3308 	int ret;
3309 
3310 	memset(pads, 0, sizeof(pads));
3311 
3312 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3313 	if (!skb) {
3314 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3315 		return -ENOMEM;
3316 	}
3317 
3318 	rcu_read_lock();
3319 
3320 	if (rtwsta_link)
3321 		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3322 
3323 	if (rtwsta_link && link_sta->he_cap.has_he)
3324 		__get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3325 
3326 	if (vif->p2p)
3327 		lowest_rate = RTW89_HW_RATE_OFDM6;
3328 	else if (chan->band_type == RTW89_BAND_2G)
3329 		lowest_rate = RTW89_HW_RATE_CCK1;
3330 	else
3331 		lowest_rate = RTW89_HW_RATE_OFDM6;
3332 
3333 	skb_put(skb, H2C_CMC_TBL_LEN);
3334 	SET_CTRL_INFO_MACID(skb->data, mac_id);
3335 	SET_CTRL_INFO_OPERATION(skb->data, 1);
3336 	SET_CMC_TBL_DISRTSFB(skb->data, 1);
3337 	SET_CMC_TBL_DISDATAFB(skb->data, 1);
3338 	SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
3339 	SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
3340 	SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
3341 	if (vif->type == NL80211_IFTYPE_STATION)
3342 		SET_CMC_TBL_ULDL(skb->data, 1);
3343 	else
3344 		SET_CMC_TBL_ULDL(skb->data, 0);
3345 	SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port);
3346 	if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
3347 		SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3348 		SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3349 		SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3350 		SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3351 	} else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3352 		SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3353 		SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3354 		SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3355 		SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3356 	}
3357 	if (rtwsta_link)
3358 		SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
3359 						  link_sta->he_cap.has_he);
3360 	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3361 		SET_CMC_TBL_DATA_DCM(skb->data, 0);
3362 
3363 	rcu_read_unlock();
3364 
3365 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3366 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3367 			      chip->h2c_cctl_func_id, 0, 1,
3368 			      H2C_CMC_TBL_LEN);
3369 
3370 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3371 	if (ret) {
3372 		rtw89_err(rtwdev, "failed to send h2c\n");
3373 		goto fail;
3374 	}
3375 
3376 	return 0;
3377 fail:
3378 	dev_kfree_skb_any(skb);
3379 
3380 	return ret;
3381 }
3382 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
3383 
3384 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
3385 				      struct ieee80211_link_sta *link_sta,
3386 				      u8 *pads)
3387 {
3388 	u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3389 	u16 ppe_thres_hdr;
3390 	u8 ppe16, ppe8;
3391 	u8 n, idx, sh;
3392 	u8 ru_bitmap;
3393 	bool ppe_th;
3394 	u16 ppe;
3395 	int i;
3396 
3397 	ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3398 			       IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
3399 	if (!ppe_th) {
3400 		u8 pad;
3401 
3402 		pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3403 				  IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
3404 
3405 		for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3406 			pads[i] = pad;
3407 
3408 		return;
3409 	}
3410 
3411 	ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres);
3412 	ru_bitmap = u16_get_bits(ppe_thres_hdr,
3413 				 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
3414 	n = hweight8(ru_bitmap);
3415 	n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
3416 	    (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3417 
3418 	for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3419 		if (!(ru_bitmap & BIT(i))) {
3420 			pads[i] = 1;
3421 			continue;
3422 		}
3423 
3424 		idx = n >> 3;
3425 		sh = n & 7;
3426 		n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
3427 
3428 		ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx);
3429 		ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3430 		sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
3431 		ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3432 
3433 		if (ppe16 != 7 && ppe8 == 7)
3434 			pads[i] = RTW89_PE_DURATION_16_20;
3435 		else if (ppe8 != 7)
3436 			pads[i] = RTW89_PE_DURATION_8;
3437 		else
3438 			pads[i] = RTW89_PE_DURATION_0;
3439 	}
3440 }
3441 
3442 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3443 				   struct rtw89_vif_link *rtwvif_link,
3444 				   struct rtw89_sta_link *rtwsta_link)
3445 {
3446 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3447 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3448 	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3449 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3450 	struct ieee80211_bss_conf *bss_conf;
3451 	struct ieee80211_link_sta *link_sta;
3452 	u8 pads[RTW89_PPE_BW_NUM];
3453 	u32 len = sizeof(*h2c);
3454 	struct sk_buff *skb;
3455 	u16 lowest_rate;
3456 	int ret;
3457 
3458 	memset(pads, 0, sizeof(pads));
3459 
3460 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3461 	if (!skb) {
3462 		rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3463 		return -ENOMEM;
3464 	}
3465 
3466 	rcu_read_lock();
3467 
3468 	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
3469 
3470 	if (rtwsta_link) {
3471 		link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3472 
3473 		if (link_sta->eht_cap.has_eht)
3474 			__get_sta_eht_pkt_padding(rtwdev, link_sta, pads);
3475 		else if (link_sta->he_cap.has_he)
3476 			__get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3477 	}
3478 
3479 	if (vif->p2p)
3480 		lowest_rate = RTW89_HW_RATE_OFDM6;
3481 	else if (chan->band_type == RTW89_BAND_2G)
3482 		lowest_rate = RTW89_HW_RATE_CCK1;
3483 	else
3484 		lowest_rate = RTW89_HW_RATE_OFDM6;
3485 
3486 	skb_put(skb, len);
3487 	h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3488 
3489 	h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3490 		  le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3491 
3492 	h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
3493 		  le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
3494 	h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
3495 			      CCTLINFO_G7_W0_DISDATAFB);
3496 
3497 	h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3498 	h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3499 
3500 	h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
3501 	h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
3502 
3503 	h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
3504 	h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
3505 
3506 	h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
3507 	h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
3508 
3509 	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
3510 		h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
3511 		h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
3512 	}
3513 
3514 	if (bss_conf->eht_support) {
3515 		u16 punct = bss_conf->chanreq.oper.punctured;
3516 
3517 		h2c->w4 |= le32_encode_bits(~punct,
3518 					    CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3519 		h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3520 	}
3521 
3522 	h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
3523 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
3524 		  le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
3525 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
3526 		  le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
3527 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
3528 		  le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
3529 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
3530 		  le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
3531 				   CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3532 	h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
3533 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
3534 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
3535 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
3536 			      CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3537 
3538 	h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) |
3539 		  le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
3540 				   CCTLINFO_G7_W6_ULDL);
3541 	h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
3542 
3543 	if (rtwsta_link) {
3544 		h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
3545 					   CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
3546 		h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
3547 	}
3548 
3549 	rcu_read_unlock();
3550 
3551 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3552 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3553 			      H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3554 			      len);
3555 
3556 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3557 	if (ret) {
3558 		rtw89_err(rtwdev, "failed to send h2c\n");
3559 		goto fail;
3560 	}
3561 
3562 	return 0;
3563 fail:
3564 	dev_kfree_skb_any(skb);
3565 
3566 	return ret;
3567 }
3568 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
3569 
3570 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3571 				   struct rtw89_vif_link *rtwvif_link,
3572 				   struct rtw89_sta_link *rtwsta_link)
3573 {
3574 	struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
3575 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3576 	u32 len = sizeof(*h2c);
3577 	struct sk_buff *skb;
3578 	u16 agg_num = 0;
3579 	u8 ba_bmap = 0;
3580 	int ret;
3581 	u8 tid;
3582 
3583 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3584 	if (!skb) {
3585 		rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
3586 		return -ENOMEM;
3587 	}
3588 	skb_put(skb, len);
3589 	h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3590 
3591 	for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
3592 		if (agg_num == 0)
3593 			agg_num = rtwsta->ampdu_params[tid].agg_num;
3594 		else
3595 			agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
3596 	}
3597 
3598 	if (agg_num <= 0x20)
3599 		ba_bmap = 3;
3600 	else if (agg_num > 0x20 && agg_num <= 0x40)
3601 		ba_bmap = 0;
3602 	else if (agg_num > 0x40 && agg_num <= 0x80)
3603 		ba_bmap = 1;
3604 	else if (agg_num > 0x80 && agg_num <= 0x100)
3605 		ba_bmap = 2;
3606 	else if (agg_num > 0x100 && agg_num <= 0x200)
3607 		ba_bmap = 4;
3608 	else if (agg_num > 0x200 && agg_num <= 0x400)
3609 		ba_bmap = 5;
3610 
3611 	h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
3612 		  le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3613 
3614 	h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
3615 	h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
3616 
3617 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3618 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3619 			      H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
3620 			      len);
3621 
3622 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3623 	if (ret) {
3624 		rtw89_err(rtwdev, "failed to send h2c\n");
3625 		goto fail;
3626 	}
3627 
3628 	return 0;
3629 fail:
3630 	dev_kfree_skb_any(skb);
3631 
3632 	return ret;
3633 }
3634 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
3635 
3636 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
3637 				 struct rtw89_sta_link *rtwsta_link)
3638 {
3639 	const struct rtw89_chip_info *chip = rtwdev->chip;
3640 	struct sk_buff *skb;
3641 	int ret;
3642 
3643 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3644 	if (!skb) {
3645 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3646 		return -ENOMEM;
3647 	}
3648 	skb_put(skb, H2C_CMC_TBL_LEN);
3649 	SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
3650 	SET_CTRL_INFO_OPERATION(skb->data, 1);
3651 	if (rtwsta_link->cctl_tx_time) {
3652 		SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
3653 		SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time);
3654 	}
3655 	if (rtwsta_link->cctl_tx_retry_limit) {
3656 		SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
3657 		SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt);
3658 	}
3659 
3660 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3661 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3662 			      chip->h2c_cctl_func_id, 0, 1,
3663 			      H2C_CMC_TBL_LEN);
3664 
3665 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3666 	if (ret) {
3667 		rtw89_err(rtwdev, "failed to send h2c\n");
3668 		goto fail;
3669 	}
3670 
3671 	return 0;
3672 fail:
3673 	dev_kfree_skb_any(skb);
3674 
3675 	return ret;
3676 }
3677 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl);
3678 
3679 int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3680 				    struct rtw89_sta_link *rtwsta_link)
3681 {
3682 	struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3683 	u32 len = sizeof(*h2c);
3684 	struct sk_buff *skb;
3685 	int ret;
3686 
3687 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3688 	if (!skb) {
3689 		rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n");
3690 		return -ENOMEM;
3691 	}
3692 	skb_put(skb, len);
3693 	h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3694 
3695 	h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
3696 		  le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3697 
3698 	if (rtwsta_link->cctl_tx_time) {
3699 		h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL);
3700 		h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL);
3701 
3702 		h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time,
3703 					   CCTLINFO_G7_W2_AMPDU_MAX_TIME);
3704 		h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME);
3705 	}
3706 	if (rtwsta_link->cctl_tx_retry_limit) {
3707 		h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) |
3708 			   le32_encode_bits(rtwsta_link->data_tx_cnt_lmt,
3709 					    CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
3710 		h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL |
3711 				       CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
3712 	}
3713 
3714 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3715 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3716 			      H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3717 			      len);
3718 
3719 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3720 	if (ret) {
3721 		rtw89_err(rtwdev, "failed to send h2c\n");
3722 		goto fail;
3723 	}
3724 
3725 	return 0;
3726 fail:
3727 	dev_kfree_skb_any(skb);
3728 
3729 	return ret;
3730 }
3731 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7);
3732 
3733 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
3734 				 struct rtw89_sta_link *rtwsta_link)
3735 {
3736 	const struct rtw89_chip_info *chip = rtwdev->chip;
3737 	struct sk_buff *skb;
3738 	int ret;
3739 
3740 	if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
3741 		return 0;
3742 
3743 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3744 	if (!skb) {
3745 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3746 		return -ENOMEM;
3747 	}
3748 	skb_put(skb, H2C_CMC_TBL_LEN);
3749 	SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
3750 	SET_CTRL_INFO_OPERATION(skb->data, 1);
3751 
3752 	__rtw89_fw_h2c_set_tx_path(rtwdev, skb);
3753 
3754 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3755 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3756 			      H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
3757 			      H2C_CMC_TBL_LEN);
3758 
3759 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3760 	if (ret) {
3761 		rtw89_err(rtwdev, "failed to send h2c\n");
3762 		goto fail;
3763 	}
3764 
3765 	return 0;
3766 fail:
3767 	dev_kfree_skb_any(skb);
3768 
3769 	return ret;
3770 }
3771 
3772 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
3773 			       struct rtw89_vif_link *rtwvif_link)
3774 {
3775 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3776 						       rtwvif_link->chanctx_idx);
3777 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3778 	struct rtw89_h2c_bcn_upd *h2c;
3779 	struct sk_buff *skb_beacon;
3780 	struct ieee80211_hdr *hdr;
3781 	u32 len = sizeof(*h2c);
3782 	struct sk_buff *skb;
3783 	int bcn_total_len;
3784 	u16 beacon_rate;
3785 	u16 tim_offset;
3786 	void *noa_data;
3787 	u8 noa_len;
3788 	int ret;
3789 
3790 	if (vif->p2p)
3791 		beacon_rate = RTW89_HW_RATE_OFDM6;
3792 	else if (chan->band_type == RTW89_BAND_2G)
3793 		beacon_rate = RTW89_HW_RATE_CCK1;
3794 	else
3795 		beacon_rate = RTW89_HW_RATE_OFDM6;
3796 
3797 	skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
3798 					      NULL, 0);
3799 	if (!skb_beacon) {
3800 		rtw89_err(rtwdev, "failed to get beacon skb\n");
3801 		return -ENOMEM;
3802 	}
3803 
3804 	noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
3805 	if (noa_len &&
3806 	    (noa_len <= skb_tailroom(skb_beacon) ||
3807 	     pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
3808 		skb_put_data(skb_beacon, noa_data, noa_len);
3809 	}
3810 
3811 	hdr = (struct ieee80211_hdr *)skb_beacon;
3812 	tim_offset -= ieee80211_hdrlen(hdr->frame_control);
3813 
3814 	bcn_total_len = len + skb_beacon->len;
3815 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
3816 	if (!skb) {
3817 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3818 		dev_kfree_skb_any(skb_beacon);
3819 		return -ENOMEM;
3820 	}
3821 	skb_put(skb, len);
3822 	h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
3823 
3824 	h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) |
3825 		  le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
3826 		  le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
3827 		  le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
3828 	h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
3829 		  le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
3830 		  le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
3831 		  le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
3832 
3833 	skb_put_data(skb, skb_beacon->data, skb_beacon->len);
3834 	dev_kfree_skb_any(skb_beacon);
3835 
3836 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3837 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3838 			      H2C_FUNC_MAC_BCN_UPD, 0, 1,
3839 			      bcn_total_len);
3840 
3841 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3842 	if (ret) {
3843 		rtw89_err(rtwdev, "failed to send h2c\n");
3844 		dev_kfree_skb_any(skb);
3845 		return ret;
3846 	}
3847 
3848 	return 0;
3849 }
3850 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
3851 
3852 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
3853 				  struct rtw89_vif_link *rtwvif_link)
3854 {
3855 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3856 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3857 	struct rtw89_h2c_bcn_upd_be *h2c;
3858 	struct sk_buff *skb_beacon;
3859 	struct ieee80211_hdr *hdr;
3860 	u32 len = sizeof(*h2c);
3861 	struct sk_buff *skb;
3862 	int bcn_total_len;
3863 	u16 beacon_rate;
3864 	u16 tim_offset;
3865 	void *noa_data;
3866 	u8 noa_len;
3867 	int ret;
3868 
3869 	if (vif->p2p)
3870 		beacon_rate = RTW89_HW_RATE_OFDM6;
3871 	else if (chan->band_type == RTW89_BAND_2G)
3872 		beacon_rate = RTW89_HW_RATE_CCK1;
3873 	else
3874 		beacon_rate = RTW89_HW_RATE_OFDM6;
3875 
3876 	skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
3877 					      NULL, 0);
3878 	if (!skb_beacon) {
3879 		rtw89_err(rtwdev, "failed to get beacon skb\n");
3880 		return -ENOMEM;
3881 	}
3882 
3883 	noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
3884 	if (noa_len &&
3885 	    (noa_len <= skb_tailroom(skb_beacon) ||
3886 	     pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
3887 		skb_put_data(skb_beacon, noa_data, noa_len);
3888 	}
3889 
3890 	hdr = (struct ieee80211_hdr *)skb_beacon;
3891 	tim_offset -= ieee80211_hdrlen(hdr->frame_control);
3892 
3893 	bcn_total_len = len + skb_beacon->len;
3894 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
3895 	if (!skb) {
3896 		rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3897 		dev_kfree_skb_any(skb_beacon);
3898 		return -ENOMEM;
3899 	}
3900 	skb_put(skb, len);
3901 	h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
3902 
3903 	h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
3904 		  le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
3905 		  le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
3906 		  le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
3907 	h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
3908 		  le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
3909 		  le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
3910 		  le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
3911 
3912 	skb_put_data(skb, skb_beacon->data, skb_beacon->len);
3913 	dev_kfree_skb_any(skb_beacon);
3914 
3915 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3916 			      H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3917 			      H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
3918 			      bcn_total_len);
3919 
3920 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3921 	if (ret) {
3922 		rtw89_err(rtwdev, "failed to send h2c\n");
3923 		goto fail;
3924 	}
3925 
3926 	return 0;
3927 
3928 fail:
3929 	dev_kfree_skb_any(skb);
3930 
3931 	return ret;
3932 }
3933 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
3934 
3935 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
3936 			       struct rtw89_vif_link *rtwvif_link,
3937 			       struct rtw89_sta_link *rtwsta_link,
3938 			       enum rtw89_upd_mode upd_mode)
3939 {
3940 	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3941 	struct rtw89_h2c_role_maintain *h2c;
3942 	u32 len = sizeof(*h2c);
3943 	struct sk_buff *skb;
3944 	u8 self_role;
3945 	int ret;
3946 
3947 	if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
3948 		if (rtwsta_link)
3949 			self_role = RTW89_SELF_ROLE_AP_CLIENT;
3950 		else
3951 			self_role = rtwvif_link->self_role;
3952 	} else {
3953 		self_role = rtwvif_link->self_role;
3954 	}
3955 
3956 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3957 	if (!skb) {
3958 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
3959 		return -ENOMEM;
3960 	}
3961 	skb_put(skb, len);
3962 	h2c = (struct rtw89_h2c_role_maintain *)skb->data;
3963 
3964 	h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) |
3965 		  le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) |
3966 		  le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) |
3967 		  le32_encode_bits(rtwvif_link->wifi_role,
3968 				   RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) |
3969 		  le32_encode_bits(rtwvif_link->mac_idx,
3970 				   RTW89_H2C_ROLE_MAINTAIN_W0_BAND) |
3971 		  le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT);
3972 
3973 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3974 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
3975 			      H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
3976 			      len);
3977 
3978 	ret = rtw89_h2c_tx(rtwdev, skb, false);
3979 	if (ret) {
3980 		rtw89_err(rtwdev, "failed to send h2c\n");
3981 		goto fail;
3982 	}
3983 
3984 	return 0;
3985 fail:
3986 	dev_kfree_skb_any(skb);
3987 
3988 	return ret;
3989 }
3990 
3991 static enum rtw89_fw_sta_type
3992 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
3993 		      struct rtw89_sta_link *rtwsta_link)
3994 {
3995 	struct ieee80211_bss_conf *bss_conf;
3996 	struct ieee80211_link_sta *link_sta;
3997 	enum rtw89_fw_sta_type type;
3998 
3999 	rcu_read_lock();
4000 
4001 	if (!rtwsta_link)
4002 		goto by_vif;
4003 
4004 	link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
4005 
4006 	if (link_sta->eht_cap.has_eht)
4007 		type = RTW89_FW_BE_STA;
4008 	else if (link_sta->he_cap.has_he)
4009 		type = RTW89_FW_AX_STA;
4010 	else
4011 		type = RTW89_FW_N_AC_STA;
4012 
4013 	goto out;
4014 
4015 by_vif:
4016 	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
4017 
4018 	if (bss_conf->eht_support)
4019 		type = RTW89_FW_BE_STA;
4020 	else if (bss_conf->he_support)
4021 		type = RTW89_FW_AX_STA;
4022 	else
4023 		type = RTW89_FW_N_AC_STA;
4024 
4025 out:
4026 	rcu_read_unlock();
4027 
4028 	return type;
4029 }
4030 
4031 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4032 			   struct rtw89_sta_link *rtwsta_link, bool dis_conn)
4033 {
4034 	u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
4035 	struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4036 	bool is_mld = ieee80211_vif_is_mld(vif);
4037 	u8 self_role = rtwvif_link->self_role;
4038 	enum rtw89_fw_sta_type sta_type;
4039 	u8 net_type = rtwvif_link->net_type;
4040 	struct rtw89_h2c_join_v1 *h2c_v1;
4041 	struct rtw89_h2c_join *h2c;
4042 	u32 len = sizeof(*h2c);
4043 	bool format_v1 = false;
4044 	struct sk_buff *skb;
4045 	u8 main_mac_id;
4046 	bool init_ps;
4047 	int ret;
4048 
4049 	if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
4050 		len = sizeof(*h2c_v1);
4051 		format_v1 = true;
4052 	}
4053 
4054 	if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) {
4055 		self_role = RTW89_SELF_ROLE_AP_CLIENT;
4056 		net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
4057 	}
4058 
4059 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4060 	if (!skb) {
4061 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4062 		return -ENOMEM;
4063 	}
4064 	skb_put(skb, len);
4065 	h2c = (struct rtw89_h2c_join *)skb->data;
4066 
4067 	h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
4068 		  le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
4069 		  le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
4070 		  le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) |
4071 		  le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) |
4072 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
4073 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
4074 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
4075 		  le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
4076 		  le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
4077 		  le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
4078 		  le32_encode_bits(rtwvif_link->wifi_role,
4079 				   RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
4080 		  le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
4081 
4082 	if (!format_v1)
4083 		goto done;
4084 
4085 	h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
4086 
4087 	sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link);
4088 	init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif);
4089 
4090 	if (rtwsta_link)
4091 		main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta);
4092 	else
4093 		main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif);
4094 
4095 	h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) |
4096 		     le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) |
4097 		     le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) |
4098 		     le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR,
4099 				      RTW89_H2C_JOININFO_W1_MLO_MODE) |
4100 		     le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) |
4101 		     le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) |
4102 		     le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) |
4103 		     le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US,
4104 				      RTW89_H2C_JOININFO_W1_EMLSR_PADDING) |
4105 		     le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US,
4106 				      RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) |
4107 		     le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) |
4108 		     le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT);
4109 
4110 	h2c_v1->w2 = 0;
4111 
4112 done:
4113 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4114 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
4115 			      H2C_FUNC_MAC_JOININFO, 0, 1,
4116 			      len);
4117 
4118 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4119 	if (ret) {
4120 		rtw89_err(rtwdev, "failed to send h2c\n");
4121 		goto fail;
4122 	}
4123 
4124 	return 0;
4125 fail:
4126 	dev_kfree_skb_any(skb);
4127 
4128 	return ret;
4129 }
4130 
4131 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en)
4132 {
4133 	struct rtw89_h2c_notify_dbcc *h2c;
4134 	u32 len = sizeof(*h2c);
4135 	struct sk_buff *skb;
4136 	int ret;
4137 
4138 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4139 	if (!skb) {
4140 		rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n");
4141 		return -ENOMEM;
4142 	}
4143 	skb_put(skb, len);
4144 	h2c = (struct rtw89_h2c_notify_dbcc *)skb->data;
4145 
4146 	h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN);
4147 
4148 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4149 			      H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
4150 			      H2C_FUNC_NOTIFY_DBCC, 0, 1,
4151 			      len);
4152 
4153 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4154 	if (ret) {
4155 		rtw89_err(rtwdev, "failed to send h2c\n");
4156 		goto fail;
4157 	}
4158 
4159 	return 0;
4160 fail:
4161 	dev_kfree_skb_any(skb);
4162 
4163 	return ret;
4164 }
4165 
4166 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
4167 			     bool pause)
4168 {
4169 	struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
4170 	struct rtw89_fw_macid_pause_grp *h2c;
4171 	__le32 set = cpu_to_le32(BIT(sh));
4172 	u8 h2c_macid_pause_id;
4173 	struct sk_buff *skb;
4174 	u32 len;
4175 	int ret;
4176 
4177 	if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
4178 		h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
4179 		len = sizeof(*h2c_new);
4180 	} else {
4181 		h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
4182 		len = sizeof(*h2c);
4183 	}
4184 
4185 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4186 	if (!skb) {
4187 		rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
4188 		return -ENOMEM;
4189 	}
4190 	skb_put(skb, len);
4191 
4192 	if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
4193 		h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
4194 
4195 		h2c_new->n[0].pause_mask_grp[grp] = set;
4196 		h2c_new->n[0].sleep_mask_grp[grp] = set;
4197 		if (pause) {
4198 			h2c_new->n[0].pause_grp[grp] = set;
4199 			h2c_new->n[0].sleep_grp[grp] = set;
4200 		}
4201 	} else {
4202 		h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
4203 
4204 		h2c->mask_grp[grp] = set;
4205 		if (pause)
4206 			h2c->pause_grp[grp] = set;
4207 	}
4208 
4209 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4210 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4211 			      h2c_macid_pause_id, 1, 0,
4212 			      len);
4213 
4214 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4215 	if (ret) {
4216 		rtw89_err(rtwdev, "failed to send h2c\n");
4217 		goto fail;
4218 	}
4219 
4220 	return 0;
4221 fail:
4222 	dev_kfree_skb_any(skb);
4223 
4224 	return ret;
4225 }
4226 
4227 #define H2C_EDCA_LEN 12
4228 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4229 			  u8 ac, u32 val)
4230 {
4231 	struct sk_buff *skb;
4232 	int ret;
4233 
4234 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
4235 	if (!skb) {
4236 		rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
4237 		return -ENOMEM;
4238 	}
4239 	skb_put(skb, H2C_EDCA_LEN);
4240 	RTW89_SET_EDCA_SEL(skb->data, 0);
4241 	RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx);
4242 	RTW89_SET_EDCA_WMM(skb->data, 0);
4243 	RTW89_SET_EDCA_AC(skb->data, ac);
4244 	RTW89_SET_EDCA_PARAM(skb->data, val);
4245 
4246 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4247 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4248 			      H2C_FUNC_USR_EDCA, 0, 1,
4249 			      H2C_EDCA_LEN);
4250 
4251 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4252 	if (ret) {
4253 		rtw89_err(rtwdev, "failed to send h2c\n");
4254 		goto fail;
4255 	}
4256 
4257 	return 0;
4258 fail:
4259 	dev_kfree_skb_any(skb);
4260 
4261 	return ret;
4262 }
4263 
4264 #define H2C_TSF32_TOGL_LEN 4
4265 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev,
4266 			      struct rtw89_vif_link *rtwvif_link,
4267 			      bool en)
4268 {
4269 	struct sk_buff *skb;
4270 	u16 early_us = en ? 2000 : 0;
4271 	u8 *cmd;
4272 	int ret;
4273 
4274 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
4275 	if (!skb) {
4276 		rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
4277 		return -ENOMEM;
4278 	}
4279 	skb_put(skb, H2C_TSF32_TOGL_LEN);
4280 	cmd = skb->data;
4281 
4282 	RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx);
4283 	RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
4284 	RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port);
4285 	RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
4286 
4287 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4288 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4289 			      H2C_FUNC_TSF32_TOGL, 0, 0,
4290 			      H2C_TSF32_TOGL_LEN);
4291 
4292 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4293 	if (ret) {
4294 		rtw89_err(rtwdev, "failed to send h2c\n");
4295 		goto fail;
4296 	}
4297 
4298 	return 0;
4299 fail:
4300 	dev_kfree_skb_any(skb);
4301 
4302 	return ret;
4303 }
4304 
4305 #define H2C_OFLD_CFG_LEN 8
4306 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
4307 {
4308 	static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
4309 	struct sk_buff *skb;
4310 	int ret;
4311 
4312 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
4313 	if (!skb) {
4314 		rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
4315 		return -ENOMEM;
4316 	}
4317 	skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
4318 
4319 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4320 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4321 			      H2C_FUNC_OFLD_CFG, 0, 1,
4322 			      H2C_OFLD_CFG_LEN);
4323 
4324 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4325 	if (ret) {
4326 		rtw89_err(rtwdev, "failed to send h2c\n");
4327 		goto fail;
4328 	}
4329 
4330 	return 0;
4331 fail:
4332 	dev_kfree_skb_any(skb);
4333 
4334 	return ret;
4335 }
4336 
4337 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv)
4338 {
4339 	struct rtw89_h2c_tx_duty *h2c;
4340 	u32 len = sizeof(*h2c);
4341 	struct sk_buff *skb;
4342 	u16 pause, active;
4343 	int ret;
4344 
4345 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4346 	if (!skb) {
4347 		rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n");
4348 		return -ENOMEM;
4349 	}
4350 
4351 	skb_put(skb, len);
4352 	h2c = (struct rtw89_h2c_tx_duty *)skb->data;
4353 
4354 	static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100);
4355 
4356 	if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) {
4357 		h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP);
4358 	} else {
4359 		active = 100 - lv * RTW89_THERMAL_PROT_STEP;
4360 		pause = 100 - active;
4361 
4362 		h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) |
4363 			  le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK);
4364 	}
4365 
4366 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4367 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4368 			      H2C_FUNC_TX_DUTY, 0, 0, len);
4369 
4370 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4371 	if (ret) {
4372 		rtw89_err(rtwdev, "failed to send h2c\n");
4373 		goto fail;
4374 	}
4375 
4376 	return 0;
4377 fail:
4378 	dev_kfree_skb_any(skb);
4379 
4380 	return ret;
4381 }
4382 
4383 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
4384 				  struct rtw89_vif_link *rtwvif_link,
4385 				  bool connect)
4386 {
4387 	struct ieee80211_bss_conf *bss_conf;
4388 	s32 thold = RTW89_DEFAULT_CQM_THOLD;
4389 	u32 hyst = RTW89_DEFAULT_CQM_HYST;
4390 	struct rtw89_h2c_bcnfltr *h2c;
4391 	u32 len = sizeof(*h2c);
4392 	struct sk_buff *skb;
4393 	u8 max_cnt, cnt;
4394 	int ret;
4395 
4396 	if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
4397 		return -EINVAL;
4398 
4399 	if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
4400 		return -EINVAL;
4401 
4402 	rcu_read_lock();
4403 
4404 	bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
4405 
4406 	if (bss_conf->cqm_rssi_hyst)
4407 		hyst = bss_conf->cqm_rssi_hyst;
4408 	if (bss_conf->cqm_rssi_thold)
4409 		thold = bss_conf->cqm_rssi_thold;
4410 
4411 	rcu_read_unlock();
4412 
4413 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4414 	if (!skb) {
4415 		rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
4416 		return -ENOMEM;
4417 	}
4418 
4419 	skb_put(skb, len);
4420 	h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
4421 
4422 	if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw))
4423 		max_cnt = BIT(7) - 1;
4424 	else
4425 		max_cnt = BIT(4) - 1;
4426 
4427 	cnt = min(RTW89_BCN_LOSS_CNT, max_cnt);
4428 
4429 	h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
4430 		  le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
4431 		  le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
4432 		  le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
4433 				   RTW89_H2C_BCNFLTR_W0_MODE) |
4434 		  le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) |
4435 		  le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) |
4436 		  le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
4437 		  le32_encode_bits(thold + MAX_RSSI,
4438 				   RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
4439 		  le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
4440 
4441 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4442 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4443 			      H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
4444 
4445 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4446 	if (ret) {
4447 		rtw89_err(rtwdev, "failed to send h2c\n");
4448 		goto fail;
4449 	}
4450 
4451 	return 0;
4452 fail:
4453 	dev_kfree_skb_any(skb);
4454 
4455 	return ret;
4456 }
4457 
4458 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
4459 			      struct rtw89_rx_phy_ppdu *phy_ppdu)
4460 {
4461 	struct rtw89_h2c_ofld_rssi *h2c;
4462 	u32 len = sizeof(*h2c);
4463 	struct sk_buff *skb;
4464 	s8 rssi;
4465 	int ret;
4466 
4467 	if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
4468 		return -EINVAL;
4469 
4470 	if (!phy_ppdu)
4471 		return -EINVAL;
4472 
4473 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4474 	if (!skb) {
4475 		rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
4476 		return -ENOMEM;
4477 	}
4478 
4479 	rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
4480 	skb_put(skb, len);
4481 	h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
4482 
4483 	h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
4484 		  le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
4485 	h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
4486 
4487 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4488 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4489 			      H2C_FUNC_OFLD_RSSI, 0, 1, len);
4490 
4491 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4492 	if (ret) {
4493 		rtw89_err(rtwdev, "failed to send h2c\n");
4494 		goto fail;
4495 	}
4496 
4497 	return 0;
4498 fail:
4499 	dev_kfree_skb_any(skb);
4500 
4501 	return ret;
4502 }
4503 
4504 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
4505 {
4506 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
4507 	struct rtw89_traffic_stats *stats = &rtwvif->stats;
4508 	struct rtw89_h2c_ofld *h2c;
4509 	u32 len = sizeof(*h2c);
4510 	struct sk_buff *skb;
4511 	int ret;
4512 
4513 	if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
4514 		return -EINVAL;
4515 
4516 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4517 	if (!skb) {
4518 		rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
4519 		return -ENOMEM;
4520 	}
4521 
4522 	skb_put(skb, len);
4523 	h2c = (struct rtw89_h2c_ofld *)skb->data;
4524 
4525 	h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
4526 		  le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
4527 		  le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
4528 
4529 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4530 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4531 			      H2C_FUNC_OFLD_TP, 0, 1, len);
4532 
4533 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4534 	if (ret) {
4535 		rtw89_err(rtwdev, "failed to send h2c\n");
4536 		goto fail;
4537 	}
4538 
4539 	return 0;
4540 fail:
4541 	dev_kfree_skb_any(skb);
4542 
4543 	return ret;
4544 }
4545 
4546 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
4547 {
4548 	const struct rtw89_chip_info *chip = rtwdev->chip;
4549 	struct rtw89_h2c_ra_v1 *h2c_v1;
4550 	struct rtw89_h2c_ra *h2c;
4551 	u32 len = sizeof(*h2c);
4552 	bool format_v1 = false;
4553 	struct sk_buff *skb;
4554 	int ret;
4555 
4556 	if (chip->chip_gen == RTW89_CHIP_BE) {
4557 		len = sizeof(*h2c_v1);
4558 		format_v1 = true;
4559 	}
4560 
4561 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4562 	if (!skb) {
4563 		rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4564 		return -ENOMEM;
4565 	}
4566 	skb_put(skb, len);
4567 	h2c = (struct rtw89_h2c_ra *)skb->data;
4568 	rtw89_debug(rtwdev, RTW89_DBG_RA,
4569 		    "ra cmd msk: %llx ", ra->ra_mask);
4570 
4571 	h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
4572 		  le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
4573 		  le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
4574 		  le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
4575 		  le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
4576 		  le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
4577 		  le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
4578 		  le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
4579 		  le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
4580 		  le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
4581 		  le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
4582 		  le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
4583 		  le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
4584 		  le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
4585 	h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
4586 	h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
4587 	h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
4588 		  le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
4589 
4590 	if (!format_v1)
4591 		goto csi;
4592 
4593 	h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
4594 	h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
4595 		     le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
4596 
4597 csi:
4598 	if (!csi)
4599 		goto done;
4600 
4601 	h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
4602 	h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
4603 		   le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
4604 		   le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
4605 		   le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
4606 		   le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
4607 		   le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
4608 		   le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
4609 		   le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
4610 
4611 done:
4612 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4613 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
4614 			      H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
4615 			      len);
4616 
4617 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4618 	if (ret) {
4619 		rtw89_err(rtwdev, "failed to send h2c\n");
4620 		goto fail;
4621 	}
4622 
4623 	return 0;
4624 fail:
4625 	dev_kfree_skb_any(skb);
4626 
4627 	return ret;
4628 }
4629 
4630 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type)
4631 {
4632 	struct rtw89_btc *btc = &rtwdev->btc;
4633 	struct rtw89_btc_dm *dm = &btc->dm;
4634 	struct rtw89_btc_init_info *init_info = &dm->init_info.init;
4635 	struct rtw89_btc_module *module = &init_info->module;
4636 	struct rtw89_btc_ant_info *ant = &module->ant;
4637 	struct rtw89_h2c_cxinit *h2c;
4638 	u32 len = sizeof(*h2c);
4639 	struct sk_buff *skb;
4640 	int ret;
4641 
4642 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4643 	if (!skb) {
4644 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
4645 		return -ENOMEM;
4646 	}
4647 	skb_put(skb, len);
4648 	h2c = (struct rtw89_h2c_cxinit *)skb->data;
4649 
4650 	h2c->hdr.type = type;
4651 	h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
4652 
4653 	h2c->ant_type = ant->type;
4654 	h2c->ant_num = ant->num;
4655 	h2c->ant_iso = ant->isolation;
4656 	h2c->ant_info =
4657 		u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
4658 		u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
4659 		u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
4660 		u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
4661 
4662 	h2c->mod_rfe = module->rfe_type;
4663 	h2c->mod_cv = module->cv;
4664 	h2c->mod_info =
4665 		u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
4666 		u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
4667 		u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
4668 		u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
4669 	h2c->mod_adie_kt = module->kt_ver_adie;
4670 	h2c->wl_gch = init_info->wl_guard_ch;
4671 
4672 	h2c->info =
4673 		u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
4674 		u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
4675 		u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
4676 		u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
4677 		u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
4678 
4679 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4680 			      H2C_CAT_OUTSRC, BTFC_SET,
4681 			      SET_DRV_INFO, 0, 0,
4682 			      len);
4683 
4684 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4685 	if (ret) {
4686 		rtw89_err(rtwdev, "failed to send h2c\n");
4687 		goto fail;
4688 	}
4689 
4690 	return 0;
4691 fail:
4692 	dev_kfree_skb_any(skb);
4693 
4694 	return ret;
4695 }
4696 
4697 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type)
4698 {
4699 	struct rtw89_btc *btc = &rtwdev->btc;
4700 	struct rtw89_btc_dm *dm = &btc->dm;
4701 	struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7;
4702 	struct rtw89_h2c_cxinit_v7 *h2c;
4703 	u32 len = sizeof(*h2c);
4704 	struct sk_buff *skb;
4705 	int ret;
4706 
4707 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4708 	if (!skb) {
4709 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n");
4710 		return -ENOMEM;
4711 	}
4712 	skb_put(skb, len);
4713 	h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data;
4714 
4715 	h2c->hdr.type = type;
4716 	h2c->hdr.ver = btc->ver->fcxinit;
4717 	h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
4718 	h2c->init = *init_info;
4719 
4720 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4721 			      H2C_CAT_OUTSRC, BTFC_SET,
4722 			      SET_DRV_INFO, 0, 0,
4723 			      len);
4724 
4725 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4726 	if (ret) {
4727 		rtw89_err(rtwdev, "failed to send h2c\n");
4728 		goto fail;
4729 	}
4730 
4731 	return 0;
4732 fail:
4733 	dev_kfree_skb_any(skb);
4734 
4735 	return ret;
4736 }
4737 
4738 #define PORT_DATA_OFFSET 4
4739 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
4740 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
4741 	(4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
4742 
4743 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type)
4744 {
4745 	struct rtw89_btc *btc = &rtwdev->btc;
4746 	const struct rtw89_btc_ver *ver = btc->ver;
4747 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4748 	struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
4749 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4750 	struct rtw89_btc_wl_active_role *active = role_info->active_role;
4751 	struct sk_buff *skb;
4752 	u32 len;
4753 	u8 offset = 0;
4754 	u8 *cmd;
4755 	int ret;
4756 	int i;
4757 
4758 	len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
4759 
4760 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4761 	if (!skb) {
4762 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4763 		return -ENOMEM;
4764 	}
4765 	skb_put(skb, len);
4766 	cmd = skb->data;
4767 
4768 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4769 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4770 
4771 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4772 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4773 
4774 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4775 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4776 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4777 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4778 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4779 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4780 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4781 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4782 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4783 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4784 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4785 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4786 
4787 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4788 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
4789 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
4790 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
4791 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
4792 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
4793 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
4794 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
4795 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
4796 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
4797 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
4798 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
4799 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
4800 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
4801 	}
4802 
4803 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4804 			      H2C_CAT_OUTSRC, BTFC_SET,
4805 			      SET_DRV_INFO, 0, 0,
4806 			      len);
4807 
4808 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4809 	if (ret) {
4810 		rtw89_err(rtwdev, "failed to send h2c\n");
4811 		goto fail;
4812 	}
4813 
4814 	return 0;
4815 fail:
4816 	dev_kfree_skb_any(skb);
4817 
4818 	return ret;
4819 }
4820 
4821 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
4822 	(4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
4823 
4824 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type)
4825 {
4826 	struct rtw89_btc *btc = &rtwdev->btc;
4827 	const struct rtw89_btc_ver *ver = btc->ver;
4828 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4829 	struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
4830 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4831 	struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
4832 	struct sk_buff *skb;
4833 	u32 len;
4834 	u8 *cmd, offset;
4835 	int ret;
4836 	int i;
4837 
4838 	len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
4839 
4840 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4841 	if (!skb) {
4842 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4843 		return -ENOMEM;
4844 	}
4845 	skb_put(skb, len);
4846 	cmd = skb->data;
4847 
4848 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4849 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4850 
4851 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4852 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4853 
4854 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4855 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4856 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4857 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4858 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4859 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4860 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4861 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4862 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4863 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4864 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4865 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4866 
4867 	offset = PORT_DATA_OFFSET;
4868 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4869 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
4870 		RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
4871 		RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
4872 		RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
4873 		RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
4874 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
4875 		RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
4876 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
4877 		RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
4878 		RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
4879 		RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
4880 		RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
4881 		RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
4882 		RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
4883 	}
4884 
4885 	offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
4886 	RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
4887 	RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
4888 	RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
4889 	RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
4890 	RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
4891 	RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
4892 
4893 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4894 			      H2C_CAT_OUTSRC, BTFC_SET,
4895 			      SET_DRV_INFO, 0, 0,
4896 			      len);
4897 
4898 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4899 	if (ret) {
4900 		rtw89_err(rtwdev, "failed to send h2c\n");
4901 		goto fail;
4902 	}
4903 
4904 	return 0;
4905 fail:
4906 	dev_kfree_skb_any(skb);
4907 
4908 	return ret;
4909 }
4910 
4911 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
4912 	(4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
4913 
4914 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type)
4915 {
4916 	struct rtw89_btc *btc = &rtwdev->btc;
4917 	const struct rtw89_btc_ver *ver = btc->ver;
4918 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4919 	struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
4920 	struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4921 	struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
4922 	struct sk_buff *skb;
4923 	u32 len;
4924 	u8 *cmd, offset;
4925 	int ret;
4926 	int i;
4927 
4928 	len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
4929 
4930 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4931 	if (!skb) {
4932 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4933 		return -ENOMEM;
4934 	}
4935 	skb_put(skb, len);
4936 	cmd = skb->data;
4937 
4938 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4939 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4940 
4941 	RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4942 	RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4943 
4944 	RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4945 	RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4946 	RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4947 	RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4948 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4949 	RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4950 	RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4951 	RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4952 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4953 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4954 	RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4955 	RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4956 
4957 	offset = PORT_DATA_OFFSET;
4958 	for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4959 		RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
4960 		RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
4961 		RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
4962 		RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
4963 		RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
4964 		RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
4965 		RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
4966 		RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
4967 		RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
4968 		RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
4969 	}
4970 
4971 	offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
4972 	RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
4973 	RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
4974 	RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
4975 	RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
4976 	RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
4977 	RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
4978 
4979 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4980 			      H2C_CAT_OUTSRC, BTFC_SET,
4981 			      SET_DRV_INFO, 0, 0,
4982 			      len);
4983 
4984 	ret = rtw89_h2c_tx(rtwdev, skb, false);
4985 	if (ret) {
4986 		rtw89_err(rtwdev, "failed to send h2c\n");
4987 		goto fail;
4988 	}
4989 
4990 	return 0;
4991 fail:
4992 	dev_kfree_skb_any(skb);
4993 
4994 	return ret;
4995 }
4996 
4997 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
4998 {
4999 	struct rtw89_btc *btc = &rtwdev->btc;
5000 	struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
5001 	struct rtw89_h2c_cxrole_v7 *h2c;
5002 	u32 len = sizeof(*h2c);
5003 	struct sk_buff *skb;
5004 	int ret;
5005 
5006 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5007 	if (!skb) {
5008 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5009 		return -ENOMEM;
5010 	}
5011 	skb_put(skb, len);
5012 	h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
5013 
5014 	h2c->hdr.type = type;
5015 	h2c->hdr.ver = btc->ver->fwlrole;
5016 	h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5017 	memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
5018 	h2c->_u32.role_map = cpu_to_le32(role->role_map);
5019 	h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
5020 	h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
5021 	h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
5022 	h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
5023 	h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
5024 
5025 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5026 			      H2C_CAT_OUTSRC, BTFC_SET,
5027 			      SET_DRV_INFO, 0, 0,
5028 			      len);
5029 
5030 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5031 	if (ret) {
5032 		rtw89_err(rtwdev, "failed to send h2c\n");
5033 		goto fail;
5034 	}
5035 
5036 	return 0;
5037 fail:
5038 	dev_kfree_skb_any(skb);
5039 
5040 	return ret;
5041 }
5042 
5043 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
5044 {
5045 	struct rtw89_btc *btc = &rtwdev->btc;
5046 	struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8;
5047 	struct rtw89_h2c_cxrole_v8 *h2c;
5048 	u32 len = sizeof(*h2c);
5049 	struct sk_buff *skb;
5050 	int ret;
5051 
5052 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5053 	if (!skb) {
5054 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5055 		return -ENOMEM;
5056 	}
5057 	skb_put(skb, len);
5058 	h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
5059 
5060 	h2c->hdr.type = type;
5061 	h2c->hdr.ver = btc->ver->fwlrole;
5062 	h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5063 	memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
5064 	h2c->_u32.role_map = cpu_to_le32(role->role_map);
5065 	h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
5066 	h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
5067 
5068 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5069 			      H2C_CAT_OUTSRC, BTFC_SET,
5070 			      SET_DRV_INFO, 0, 0,
5071 			      len);
5072 
5073 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5074 	if (ret) {
5075 		rtw89_err(rtwdev, "failed to send h2c\n");
5076 		goto fail;
5077 	}
5078 
5079 	return 0;
5080 fail:
5081 	dev_kfree_skb_any(skb);
5082 
5083 	return ret;
5084 }
5085 
5086 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
5087 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
5088 {
5089 	struct rtw89_btc *btc = &rtwdev->btc;
5090 	const struct rtw89_btc_ver *ver = btc->ver;
5091 	struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl;
5092 	struct sk_buff *skb;
5093 	u8 *cmd;
5094 	int ret;
5095 
5096 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
5097 	if (!skb) {
5098 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5099 		return -ENOMEM;
5100 	}
5101 	skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
5102 	cmd = skb->data;
5103 
5104 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5105 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
5106 
5107 	RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
5108 	RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
5109 	RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
5110 	if (ver->fcxctrl == 0)
5111 		RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
5112 
5113 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5114 			      H2C_CAT_OUTSRC, BTFC_SET,
5115 			      SET_DRV_INFO, 0, 0,
5116 			      H2C_LEN_CXDRVINFO_CTRL);
5117 
5118 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5119 	if (ret) {
5120 		rtw89_err(rtwdev, "failed to send h2c\n");
5121 		goto fail;
5122 	}
5123 
5124 	return 0;
5125 fail:
5126 	dev_kfree_skb_any(skb);
5127 
5128 	return ret;
5129 }
5130 
5131 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
5132 {
5133 	struct rtw89_btc *btc = &rtwdev->btc;
5134 	struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7;
5135 	struct rtw89_h2c_cxctrl_v7 *h2c;
5136 	u32 len = sizeof(*h2c);
5137 	struct sk_buff *skb;
5138 	int ret;
5139 
5140 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5141 	if (!skb) {
5142 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
5143 		return -ENOMEM;
5144 	}
5145 	skb_put(skb, len);
5146 	h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data;
5147 
5148 	h2c->hdr.type = type;
5149 	h2c->hdr.ver = btc->ver->fcxctrl;
5150 	h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7;
5151 	h2c->ctrl = *ctrl;
5152 
5153 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5154 			      H2C_CAT_OUTSRC, BTFC_SET,
5155 			      SET_DRV_INFO, 0, 0, len);
5156 
5157 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5158 	if (ret) {
5159 		rtw89_err(rtwdev, "failed to send h2c\n");
5160 		goto fail;
5161 	}
5162 
5163 	return 0;
5164 fail:
5165 	dev_kfree_skb_any(skb);
5166 
5167 	return ret;
5168 }
5169 
5170 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
5171 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type)
5172 {
5173 	struct rtw89_btc *btc = &rtwdev->btc;
5174 	struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
5175 	struct sk_buff *skb;
5176 	u8 *cmd;
5177 	int ret;
5178 
5179 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
5180 	if (!skb) {
5181 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
5182 		return -ENOMEM;
5183 	}
5184 	skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
5185 	cmd = skb->data;
5186 
5187 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5188 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
5189 
5190 	RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
5191 	RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
5192 	RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
5193 	RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
5194 	RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
5195 	RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
5196 	RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
5197 	RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
5198 	RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
5199 	RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
5200 	RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
5201 	RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
5202 	RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
5203 	RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
5204 	RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
5205 	RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
5206 	RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
5207 
5208 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5209 			      H2C_CAT_OUTSRC, BTFC_SET,
5210 			      SET_DRV_INFO, 0, 0,
5211 			      H2C_LEN_CXDRVINFO_TRX);
5212 
5213 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5214 	if (ret) {
5215 		rtw89_err(rtwdev, "failed to send h2c\n");
5216 		goto fail;
5217 	}
5218 
5219 	return 0;
5220 fail:
5221 	dev_kfree_skb_any(skb);
5222 
5223 	return ret;
5224 }
5225 
5226 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
5227 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type)
5228 {
5229 	struct rtw89_btc *btc = &rtwdev->btc;
5230 	struct rtw89_btc_wl_info *wl = &btc->cx.wl;
5231 	struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
5232 	struct sk_buff *skb;
5233 	u8 *cmd;
5234 	int ret;
5235 
5236 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
5237 	if (!skb) {
5238 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5239 		return -ENOMEM;
5240 	}
5241 	skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
5242 	cmd = skb->data;
5243 
5244 	RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5245 	RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
5246 
5247 	RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
5248 	RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
5249 	RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
5250 	RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
5251 	RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
5252 
5253 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5254 			      H2C_CAT_OUTSRC, BTFC_SET,
5255 			      SET_DRV_INFO, 0, 0,
5256 			      H2C_LEN_CXDRVINFO_RFK);
5257 
5258 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5259 	if (ret) {
5260 		rtw89_err(rtwdev, "failed to send h2c\n");
5261 		goto fail;
5262 	}
5263 
5264 	return 0;
5265 fail:
5266 	dev_kfree_skb_any(skb);
5267 
5268 	return ret;
5269 }
5270 
5271 #define H2C_LEN_PKT_OFLD 4
5272 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
5273 {
5274 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5275 	struct sk_buff *skb;
5276 	unsigned int cond;
5277 	u8 *cmd;
5278 	int ret;
5279 
5280 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
5281 	if (!skb) {
5282 		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
5283 		return -ENOMEM;
5284 	}
5285 	skb_put(skb, H2C_LEN_PKT_OFLD);
5286 	cmd = skb->data;
5287 
5288 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
5289 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
5290 
5291 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5292 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5293 			      H2C_FUNC_PACKET_OFLD, 1, 1,
5294 			      H2C_LEN_PKT_OFLD);
5295 
5296 	cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
5297 
5298 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5299 	if (ret < 0) {
5300 		rtw89_debug(rtwdev, RTW89_DBG_FW,
5301 			    "failed to del pkt ofld: id %d, ret %d\n",
5302 			    id, ret);
5303 		return ret;
5304 	}
5305 
5306 	rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
5307 	return 0;
5308 }
5309 
5310 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
5311 				 struct sk_buff *skb_ofld)
5312 {
5313 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5314 	struct sk_buff *skb;
5315 	unsigned int cond;
5316 	u8 *cmd;
5317 	u8 alloc_id;
5318 	int ret;
5319 
5320 	alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
5321 					      RTW89_MAX_PKT_OFLD_NUM);
5322 	if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
5323 		return -ENOSPC;
5324 
5325 	*id = alloc_id;
5326 
5327 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
5328 	if (!skb) {
5329 		rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
5330 		rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
5331 		return -ENOMEM;
5332 	}
5333 	skb_put(skb, H2C_LEN_PKT_OFLD);
5334 	cmd = skb->data;
5335 
5336 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
5337 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
5338 	RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
5339 	skb_put_data(skb, skb_ofld->data, skb_ofld->len);
5340 
5341 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5342 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5343 			      H2C_FUNC_PACKET_OFLD, 1, 1,
5344 			      H2C_LEN_PKT_OFLD + skb_ofld->len);
5345 
5346 	cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
5347 
5348 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5349 	if (ret < 0) {
5350 		rtw89_debug(rtwdev, RTW89_DBG_FW,
5351 			    "failed to add pkt ofld: id %d, ret %d\n",
5352 			    alloc_id, ret);
5353 		rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
5354 		return ret;
5355 	}
5356 
5357 	return 0;
5358 }
5359 
5360 static
5361 int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num,
5362 				      struct list_head *chan_list)
5363 {
5364 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5365 	struct rtw89_h2c_chinfo_elem *elem;
5366 	struct rtw89_mac_chinfo_ax *ch_info;
5367 	struct rtw89_h2c_chinfo *h2c;
5368 	struct sk_buff *skb;
5369 	unsigned int cond;
5370 	int skb_len;
5371 	int ret;
5372 
5373 	static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
5374 
5375 	skb_len = struct_size(h2c, elem, ch_num);
5376 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
5377 	if (!skb) {
5378 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
5379 		return -ENOMEM;
5380 	}
5381 	skb_put(skb, sizeof(*h2c));
5382 	h2c = (struct rtw89_h2c_chinfo *)skb->data;
5383 
5384 	h2c->ch_num = ch_num;
5385 	h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
5386 
5387 	list_for_each_entry(ch_info, chan_list, list) {
5388 		elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
5389 
5390 		elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
5391 			   le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
5392 			   le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
5393 			   le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
5394 
5395 		elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
5396 			   le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
5397 			   le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
5398 			   le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
5399 			   le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
5400 			   le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
5401 			   le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
5402 			   le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
5403 			   le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
5404 			   le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
5405 
5406 		elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
5407 			   le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
5408 			   le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
5409 			   le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
5410 
5411 		elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
5412 			   le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
5413 			   le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
5414 			   le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
5415 	}
5416 
5417 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5418 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5419 			      H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
5420 
5421 	cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
5422 
5423 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5424 	if (ret) {
5425 		rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
5426 		return ret;
5427 	}
5428 
5429 	return 0;
5430 }
5431 
5432 static
5433 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
5434 				      struct list_head *chan_list,
5435 				      struct rtw89_vif_link *rtwvif_link)
5436 {
5437 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5438 	struct rtw89_h2c_chinfo_elem_be *elem;
5439 	struct rtw89_mac_chinfo_be *ch_info;
5440 	struct rtw89_h2c_chinfo_be *h2c;
5441 	struct sk_buff *skb;
5442 	unsigned int cond;
5443 	u8 ver = U8_MAX;
5444 	int skb_len;
5445 	int ret;
5446 
5447 	static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
5448 
5449 	skb_len = struct_size(h2c, elem, ch_num);
5450 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
5451 	if (!skb) {
5452 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
5453 		return -ENOMEM;
5454 	}
5455 
5456 	if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
5457 		ver = 0;
5458 
5459 	skb_put(skb, sizeof(*h2c));
5460 	h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
5461 
5462 	h2c->ch_num = ch_num;
5463 	h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
5464 	h2c->arg = u8_encode_bits(rtwvif_link->mac_idx,
5465 				  RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
5466 
5467 	list_for_each_entry(ch_info, chan_list, list) {
5468 		elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
5469 
5470 		elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
5471 			   le32_encode_bits(ch_info->central_ch,
5472 					    RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
5473 			   le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
5474 
5475 		elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
5476 			   le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
5477 			   le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
5478 			   le32_encode_bits(ch_info->pause_data,
5479 					    RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
5480 			   le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
5481 			   le32_encode_bits(ch_info->rand_seq_num,
5482 					    RTW89_H2C_CHINFO_BE_W1_RANDOM) |
5483 			   le32_encode_bits(ch_info->notify_action,
5484 					    RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
5485 			   le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
5486 					    RTW89_H2C_CHINFO_BE_W1_PROBE) |
5487 			   le32_encode_bits(ch_info->leave_crit,
5488 					    RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
5489 			   le32_encode_bits(ch_info->chkpt_timer,
5490 					    RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
5491 
5492 		elem->w2 = le32_encode_bits(ch_info->leave_time,
5493 					    RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
5494 			   le32_encode_bits(ch_info->leave_th,
5495 					    RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
5496 			   le32_encode_bits(ch_info->tx_pkt_ctrl,
5497 					    RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
5498 
5499 		elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
5500 			   le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
5501 			   le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
5502 			   le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
5503 
5504 		elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
5505 			   le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
5506 			   le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
5507 			   le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
5508 
5509 		elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
5510 			   le32_encode_bits(ch_info->fw_probe0_ssids,
5511 					    RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
5512 
5513 		elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
5514 					    RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
5515 			   le32_encode_bits(ch_info->fw_probe0_bssids,
5516 					    RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
5517 		if (ver == 0)
5518 			elem->w0 |=
5519 			   le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
5520 		else
5521 			elem->w7 = le32_encode_bits(ch_info->period,
5522 						    RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
5523 	}
5524 
5525 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5526 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5527 			      H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
5528 
5529 	cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
5530 
5531 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5532 	if (ret) {
5533 		rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
5534 		return ret;
5535 	}
5536 
5537 	return 0;
5538 }
5539 
5540 #define RTW89_SCAN_DELAY_TSF_UNIT 1000000
5541 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
5542 				 struct rtw89_scan_option *option,
5543 				 struct rtw89_vif_link *rtwvif_link,
5544 				 bool wowlan)
5545 {
5546 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5547 	struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
5548 	enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE;
5549 	struct rtw89_h2c_scanofld *h2c;
5550 	u32 len = sizeof(*h2c);
5551 	struct sk_buff *skb;
5552 	unsigned int cond;
5553 	u64 tsf = 0;
5554 	int ret;
5555 
5556 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5557 	if (!skb) {
5558 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
5559 		return -ENOMEM;
5560 	}
5561 	skb_put(skb, len);
5562 	h2c = (struct rtw89_h2c_scanofld *)skb->data;
5563 
5564 	if (option->delay) {
5565 		ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
5566 		if (ret) {
5567 			rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret);
5568 			scan_mode = RTW89_SCAN_IMMEDIATE;
5569 		} else {
5570 			scan_mode = RTW89_SCAN_DELAY;
5571 			tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT;
5572 		}
5573 	}
5574 
5575 	h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
5576 		  le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
5577 		  le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) |
5578 		  le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
5579 
5580 	h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
5581 		  le32_encode_bits(option->target_ch_mode,
5582 				   RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
5583 		  le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) |
5584 		  le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
5585 
5586 	h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) |
5587 		  le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD);
5588 
5589 	if (option->target_ch_mode) {
5590 		h2c->w1 |= le32_encode_bits(op->band_width,
5591 					    RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
5592 			   le32_encode_bits(op->primary_channel,
5593 					    RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
5594 			   le32_encode_bits(op->channel,
5595 					    RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
5596 		h2c->w0 |= le32_encode_bits(op->band_type,
5597 					    RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
5598 	}
5599 
5600 	h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf),
5601 					 RTW89_H2C_SCANOFLD_W3_TSF_HIGH);
5602 	h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf),
5603 					RTW89_H2C_SCANOFLD_W4_TSF_LOW);
5604 
5605 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5606 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5607 			      H2C_FUNC_SCANOFLD, 1, 1,
5608 			      len);
5609 
5610 	if (option->enable)
5611 		cond = RTW89_SCANOFLD_WAIT_COND_START;
5612 	else
5613 		cond = RTW89_SCANOFLD_WAIT_COND_STOP;
5614 
5615 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5616 	if (ret) {
5617 		rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
5618 		return ret;
5619 	}
5620 
5621 	return 0;
5622 }
5623 
5624 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
5625 					    struct rtw89_scan_option *option)
5626 {
5627 	struct ieee80211_supported_band *sband;
5628 	struct ieee80211_channel *chan;
5629 	u8 i, idx;
5630 
5631 	sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
5632 	if (!sband) {
5633 		option->prohib_chan = U64_MAX;
5634 		return;
5635 	}
5636 
5637 	for (i = 0; i < sband->n_channels; i++) {
5638 		chan = &sband->channels[i];
5639 		if (chan->flags & IEEE80211_CHAN_DISABLED) {
5640 			idx = (chan->hw_value - 1) / 4;
5641 			option->prohib_chan |= BIT(idx);
5642 		}
5643 	}
5644 }
5645 
5646 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
5647 				 struct rtw89_scan_option *option,
5648 				 struct rtw89_vif_link *rtwvif_link,
5649 				 bool wowlan)
5650 {
5651 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
5652 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
5653 	struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5654 	struct cfg80211_scan_request *req = rtwvif->scan_req;
5655 	struct rtw89_h2c_scanofld_be_macc_role *macc_role;
5656 	struct rtw89_chan *op = &scan_info->op_chan;
5657 	struct rtw89_h2c_scanofld_be_opch *opch;
5658 	struct rtw89_pktofld_info *pkt_info;
5659 	struct rtw89_h2c_scanofld_be *h2c;
5660 	struct sk_buff *skb;
5661 	u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
5662 	u8 opch_size = sizeof(*opch) * option->num_opch;
5663 	u8 probe_id[NUM_NL80211_BANDS];
5664 	u8 scan_offload_ver = U8_MAX;
5665 	u8 cfg_len = sizeof(*h2c);
5666 	unsigned int cond;
5667 	u8 ver = U8_MAX;
5668 	void *ptr;
5669 	int ret;
5670 	u32 len;
5671 	u8 i;
5672 
5673 	rtw89_scan_get_6g_disabled_chan(rtwdev, option);
5674 
5675 	if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
5676 		cfg_len = offsetofend(typeof(*h2c), w8);
5677 		scan_offload_ver = 0;
5678 	}
5679 
5680 	len = cfg_len + macc_role_size + opch_size;
5681 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5682 	if (!skb) {
5683 		rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
5684 		return -ENOMEM;
5685 	}
5686 
5687 	skb_put(skb, len);
5688 	h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
5689 	ptr = skb->data;
5690 
5691 	memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
5692 
5693 	if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
5694 		ver = 0;
5695 
5696 	if (!wowlan) {
5697 		list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
5698 			if (pkt_info->wildcard_6ghz) {
5699 				/* Provide wildcard as template */
5700 				probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
5701 				break;
5702 			}
5703 		}
5704 	}
5705 
5706 	h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
5707 		  le32_encode_bits(option->scan_mode,
5708 				   RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
5709 		  le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
5710 		  le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
5711 		  le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
5712 		  le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
5713 		  le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
5714 		  le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
5715 
5716 	h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
5717 		  le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
5718 		  le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
5719 
5720 	h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
5721 		  le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
5722 		  le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
5723 
5724 	h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
5725 		  le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
5726 		  le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
5727 		  le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
5728 
5729 	h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
5730 				   RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
5731 		  le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
5732 				   RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
5733 		  le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
5734 
5735 	h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
5736 
5737 	h2c->w6 = le32_encode_bits(option->prohib_chan,
5738 				   RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
5739 	h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
5740 				   RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
5741 	if (!wowlan && req->no_cck) {
5742 		h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
5743 		h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
5744 					   RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
5745 			  le32_encode_bits(RTW89_HW_RATE_OFDM6,
5746 					   RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) |
5747 			  le32_encode_bits(RTW89_HW_RATE_OFDM6,
5748 					   RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
5749 	}
5750 
5751 	if (scan_offload_ver == 0)
5752 		goto flex_member;
5753 
5754 	h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
5755 				   RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
5756 		  le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0),
5757 				   RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) |
5758 		  le32_encode_bits(sizeof(*opch) / sizeof(opch->w0),
5759 				   RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP);
5760 
5761 flex_member:
5762 	ptr += cfg_len;
5763 
5764 	for (i = 0; i < option->num_macc_role; i++) {
5765 		macc_role = ptr;
5766 		macc_role->w0 =
5767 			le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
5768 			le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
5769 			le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
5770 			le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
5771 		ptr += sizeof(*macc_role);
5772 	}
5773 
5774 	for (i = 0; i < option->num_opch; i++) {
5775 		opch = ptr;
5776 		opch->w0 = le32_encode_bits(rtwvif_link->mac_id,
5777 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
5778 			   le32_encode_bits(option->band,
5779 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
5780 			   le32_encode_bits(rtwvif_link->port,
5781 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
5782 			   le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
5783 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
5784 			   le32_encode_bits(true,
5785 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
5786 			   le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
5787 					    RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
5788 
5789 		opch->w1 = le32_encode_bits(op->band_type,
5790 					    RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
5791 			   le32_encode_bits(op->band_width,
5792 					    RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
5793 			   le32_encode_bits(0x3,
5794 					    RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
5795 			   le32_encode_bits(op->primary_channel,
5796 					    RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
5797 			   le32_encode_bits(op->channel,
5798 					    RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
5799 
5800 		opch->w2 = le32_encode_bits(0,
5801 					    RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
5802 			   le32_encode_bits(0,
5803 					    RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
5804 			   le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2,
5805 					    RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
5806 
5807 		opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5808 					    RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
5809 			   le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5810 					    RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
5811 			   le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5812 					    RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
5813 			   le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5814 					    RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
5815 
5816 		if (ver == 0)
5817 			opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
5818 						     RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
5819 		else
5820 			opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
5821 						    RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
5822 		ptr += sizeof(*opch);
5823 	}
5824 
5825 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5826 			      H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5827 			      H2C_FUNC_SCANOFLD_BE, 1, 1,
5828 			      len);
5829 
5830 	if (option->enable)
5831 		cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
5832 	else
5833 		cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
5834 
5835 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5836 	if (ret) {
5837 		rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
5838 		return ret;
5839 	}
5840 
5841 	return 0;
5842 }
5843 
5844 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
5845 			struct rtw89_fw_h2c_rf_reg_info *info,
5846 			u16 len, u8 page)
5847 {
5848 	struct sk_buff *skb;
5849 	u8 class = info->rf_path == RF_PATH_A ?
5850 		   H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
5851 	int ret;
5852 
5853 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5854 	if (!skb) {
5855 		rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
5856 		return -ENOMEM;
5857 	}
5858 	skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
5859 
5860 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5861 			      H2C_CAT_OUTSRC, class, page, 0, 0,
5862 			      len);
5863 
5864 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5865 	if (ret) {
5866 		rtw89_err(rtwdev, "failed to send h2c\n");
5867 		goto fail;
5868 	}
5869 
5870 	return 0;
5871 fail:
5872 	dev_kfree_skb_any(skb);
5873 
5874 	return ret;
5875 }
5876 
5877 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
5878 {
5879 	struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
5880 	struct rtw89_fw_h2c_rf_get_mccch *mccch;
5881 	struct sk_buff *skb;
5882 	int ret;
5883 	u8 idx;
5884 
5885 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
5886 	if (!skb) {
5887 		rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5888 		return -ENOMEM;
5889 	}
5890 	skb_put(skb, sizeof(*mccch));
5891 	mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
5892 
5893 	idx = rfk_mcc->table_idx;
5894 	mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
5895 	mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
5896 	mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
5897 	mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
5898 	mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
5899 	mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
5900 
5901 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5902 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
5903 			      H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
5904 			      sizeof(*mccch));
5905 
5906 	ret = rtw89_h2c_tx(rtwdev, skb, false);
5907 	if (ret) {
5908 		rtw89_err(rtwdev, "failed to send h2c\n");
5909 		goto fail;
5910 	}
5911 
5912 	return 0;
5913 fail:
5914 	dev_kfree_skb_any(skb);
5915 
5916 	return ret;
5917 }
5918 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
5919 
5920 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
5921 			     enum rtw89_phy_idx phy_idx)
5922 {
5923 	struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
5924 	struct rtw89_fw_h2c_rfk_pre_info_common *common;
5925 	struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
5926 	struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
5927 	struct rtw89_fw_h2c_rfk_pre_info *h2c;
5928 	u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
5929 	u32 len = sizeof(*h2c);
5930 	struct sk_buff *skb;
5931 	u8 ver = U8_MAX;
5932 	u8 tbl, path;
5933 	u32 val32;
5934 	int ret;
5935 
5936 	if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
5937 		len = sizeof(*h2c_v1);
5938 		ver = 1;
5939 	} else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
5940 		len = sizeof(*h2c_v0);
5941 		ver = 0;
5942 	}
5943 
5944 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5945 	if (!skb) {
5946 		rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
5947 		return -ENOMEM;
5948 	}
5949 	skb_put(skb, len);
5950 	h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
5951 	common = &h2c->base_v1.common;
5952 
5953 	common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
5954 
5955 	BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
5956 	BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
5957 
5958 	for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
5959 		for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
5960 			common->dbcc.ch[path][tbl] =
5961 				cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
5962 			common->dbcc.band[path][tbl] =
5963 				cpu_to_le32(rfk_mcc->data[path].band[tbl]);
5964 		}
5965 	}
5966 
5967 	for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
5968 		tbl_sel[path] = rfk_mcc->data[path].table_idx;
5969 
5970 		common->tbl.cur_ch[path] =
5971 			cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
5972 		common->tbl.cur_band[path] =
5973 			cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
5974 
5975 		if (ver <= 1)
5976 			continue;
5977 
5978 		h2c->cur_bandwidth[path] =
5979 			cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
5980 	}
5981 
5982 	common->phy_idx = cpu_to_le32(phy_idx);
5983 
5984 	if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
5985 		h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
5986 
5987 		h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]);
5988 		h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]);
5989 		h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]);
5990 
5991 		val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
5992 		h2c_v0->ktbl_sel0 = cpu_to_le32(val32);
5993 		val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
5994 		h2c_v0->ktbl_sel1 = cpu_to_le32(val32);
5995 		val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
5996 		h2c_v0->rfmod0 = cpu_to_le32(val32);
5997 		val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
5998 		h2c_v0->rfmod1 = cpu_to_le32(val32);
5999 
6000 		if (rtw89_is_mlo_1_1(rtwdev))
6001 			h2c_v0->mlo_1_1 = cpu_to_le32(1);
6002 
6003 		h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
6004 
6005 		goto done;
6006 	}
6007 
6008 	if (rtw89_is_mlo_1_1(rtwdev)) {
6009 		h2c_v1 = &h2c->base_v1;
6010 		h2c_v1->mlo_1_1 = cpu_to_le32(1);
6011 	}
6012 done:
6013 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6014 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6015 			      H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
6016 			      len);
6017 
6018 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6019 	if (ret) {
6020 		rtw89_err(rtwdev, "failed to send h2c\n");
6021 		goto fail;
6022 	}
6023 
6024 	return 0;
6025 fail:
6026 	dev_kfree_skb_any(skb);
6027 
6028 	return ret;
6029 }
6030 
6031 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6032 			 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
6033 {
6034 	struct rtw89_hal *hal = &rtwdev->hal;
6035 	struct rtw89_h2c_rf_tssi *h2c;
6036 	u32 len = sizeof(*h2c);
6037 	struct sk_buff *skb;
6038 	int ret;
6039 
6040 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6041 	if (!skb) {
6042 		rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
6043 		return -ENOMEM;
6044 	}
6045 	skb_put(skb, len);
6046 	h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
6047 
6048 	h2c->len = cpu_to_le16(len);
6049 	h2c->phy = phy_idx;
6050 	h2c->ch = chan->channel;
6051 	h2c->bw = chan->band_width;
6052 	h2c->band = chan->band_type;
6053 	h2c->hwtx_en = true;
6054 	h2c->cv = hal->cv;
6055 	h2c->tssi_mode = tssi_mode;
6056 
6057 	rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
6058 	rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
6059 
6060 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6061 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6062 			      H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
6063 
6064 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6065 	if (ret) {
6066 		rtw89_err(rtwdev, "failed to send h2c\n");
6067 		goto fail;
6068 	}
6069 
6070 	return 0;
6071 fail:
6072 	dev_kfree_skb_any(skb);
6073 
6074 	return ret;
6075 }
6076 
6077 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6078 			const struct rtw89_chan *chan)
6079 {
6080 	struct rtw89_h2c_rf_iqk *h2c;
6081 	u32 len = sizeof(*h2c);
6082 	struct sk_buff *skb;
6083 	int ret;
6084 
6085 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6086 	if (!skb) {
6087 		rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
6088 		return -ENOMEM;
6089 	}
6090 	skb_put(skb, len);
6091 	h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
6092 
6093 	h2c->phy_idx = cpu_to_le32(phy_idx);
6094 	h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en);
6095 
6096 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6097 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6098 			      H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
6099 
6100 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6101 	if (ret) {
6102 		rtw89_err(rtwdev, "failed to send h2c\n");
6103 		goto fail;
6104 	}
6105 
6106 	return 0;
6107 fail:
6108 	dev_kfree_skb_any(skb);
6109 
6110 	return ret;
6111 }
6112 
6113 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6114 			const struct rtw89_chan *chan)
6115 {
6116 	struct rtw89_h2c_rf_dpk *h2c;
6117 	u32 len = sizeof(*h2c);
6118 	struct sk_buff *skb;
6119 	int ret;
6120 
6121 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6122 	if (!skb) {
6123 		rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
6124 		return -ENOMEM;
6125 	}
6126 	skb_put(skb, len);
6127 	h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
6128 
6129 	h2c->len = len;
6130 	h2c->phy = phy_idx;
6131 	h2c->dpk_enable = true;
6132 	h2c->kpath = RF_AB;
6133 	h2c->cur_band = chan->band_type;
6134 	h2c->cur_bw = chan->band_width;
6135 	h2c->cur_ch = chan->channel;
6136 	h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
6137 
6138 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6139 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6140 			      H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
6141 
6142 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6143 	if (ret) {
6144 		rtw89_err(rtwdev, "failed to send h2c\n");
6145 		goto fail;
6146 	}
6147 
6148 	return 0;
6149 fail:
6150 	dev_kfree_skb_any(skb);
6151 
6152 	return ret;
6153 }
6154 
6155 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6156 			   const struct rtw89_chan *chan)
6157 {
6158 	struct rtw89_hal *hal = &rtwdev->hal;
6159 	struct rtw89_h2c_rf_txgapk *h2c;
6160 	u32 len = sizeof(*h2c);
6161 	struct sk_buff *skb;
6162 	int ret;
6163 
6164 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6165 	if (!skb) {
6166 		rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
6167 		return -ENOMEM;
6168 	}
6169 	skb_put(skb, len);
6170 	h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
6171 
6172 	h2c->len = len;
6173 	h2c->ktype = 2;
6174 	h2c->phy = phy_idx;
6175 	h2c->kpath = RF_AB;
6176 	h2c->band = chan->band_type;
6177 	h2c->bw = chan->band_width;
6178 	h2c->ch = chan->channel;
6179 	h2c->cv = hal->cv;
6180 
6181 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6182 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6183 			      H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
6184 
6185 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6186 	if (ret) {
6187 		rtw89_err(rtwdev, "failed to send h2c\n");
6188 		goto fail;
6189 	}
6190 
6191 	return 0;
6192 fail:
6193 	dev_kfree_skb_any(skb);
6194 
6195 	return ret;
6196 }
6197 
6198 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6199 			 const struct rtw89_chan *chan)
6200 {
6201 	struct rtw89_h2c_rf_dack *h2c;
6202 	u32 len = sizeof(*h2c);
6203 	struct sk_buff *skb;
6204 	int ret;
6205 
6206 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6207 	if (!skb) {
6208 		rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
6209 		return -ENOMEM;
6210 	}
6211 	skb_put(skb, len);
6212 	h2c = (struct rtw89_h2c_rf_dack *)skb->data;
6213 
6214 	h2c->len = cpu_to_le32(len);
6215 	h2c->phy = cpu_to_le32(phy_idx);
6216 	h2c->type = cpu_to_le32(0);
6217 
6218 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6219 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6220 			      H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
6221 
6222 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6223 	if (ret) {
6224 		rtw89_err(rtwdev, "failed to send h2c\n");
6225 		goto fail;
6226 	}
6227 
6228 	return 0;
6229 fail:
6230 	dev_kfree_skb_any(skb);
6231 
6232 	return ret;
6233 }
6234 
6235 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6236 			  const struct rtw89_chan *chan, bool is_chl_k)
6237 {
6238 	struct rtw89_h2c_rf_rxdck_v0 *v0;
6239 	struct rtw89_h2c_rf_rxdck *h2c;
6240 	u32 len = sizeof(*h2c);
6241 	struct sk_buff *skb;
6242 	int ver = -1;
6243 	int ret;
6244 
6245 	if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) {
6246 		len = sizeof(*v0);
6247 		ver = 0;
6248 	}
6249 
6250 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6251 	if (!skb) {
6252 		rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
6253 		return -ENOMEM;
6254 	}
6255 	skb_put(skb, len);
6256 	v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data;
6257 
6258 	v0->len = len;
6259 	v0->phy = phy_idx;
6260 	v0->is_afe = false;
6261 	v0->kpath = RF_AB;
6262 	v0->cur_band = chan->band_type;
6263 	v0->cur_bw = chan->band_width;
6264 	v0->cur_ch = chan->channel;
6265 	v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
6266 
6267 	if (ver == 0)
6268 		goto hdr;
6269 
6270 	h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
6271 	h2c->is_chl_k = is_chl_k;
6272 
6273 hdr:
6274 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6275 			      H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6276 			      H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
6277 
6278 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6279 	if (ret) {
6280 		rtw89_err(rtwdev, "failed to send h2c\n");
6281 		goto fail;
6282 	}
6283 
6284 	return 0;
6285 fail:
6286 	dev_kfree_skb_any(skb);
6287 
6288 	return ret;
6289 }
6290 
6291 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
6292 			      u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
6293 			      bool rack, bool dack)
6294 {
6295 	struct sk_buff *skb;
6296 	int ret;
6297 
6298 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6299 	if (!skb) {
6300 		rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
6301 		return -ENOMEM;
6302 	}
6303 	skb_put_data(skb, buf, len);
6304 
6305 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6306 			      H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
6307 			      len);
6308 
6309 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6310 	if (ret) {
6311 		rtw89_err(rtwdev, "failed to send h2c\n");
6312 		goto fail;
6313 	}
6314 
6315 	return 0;
6316 fail:
6317 	dev_kfree_skb_any(skb);
6318 
6319 	return ret;
6320 }
6321 
6322 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
6323 {
6324 	struct sk_buff *skb;
6325 	int ret;
6326 
6327 	skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
6328 	if (!skb) {
6329 		rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
6330 		return -ENOMEM;
6331 	}
6332 	skb_put_data(skb, buf, len);
6333 
6334 	ret = rtw89_h2c_tx(rtwdev, skb, false);
6335 	if (ret) {
6336 		rtw89_err(rtwdev, "failed to send h2c\n");
6337 		goto fail;
6338 	}
6339 
6340 	return 0;
6341 fail:
6342 	dev_kfree_skb_any(skb);
6343 
6344 	return ret;
6345 }
6346 
6347 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
6348 {
6349 	struct rtw89_early_h2c *early_h2c;
6350 
6351 	lockdep_assert_wiphy(rtwdev->hw->wiphy);
6352 
6353 	list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
6354 		rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
6355 	}
6356 }
6357 
6358 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
6359 {
6360 	struct rtw89_early_h2c *early_h2c, *tmp;
6361 
6362 	list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
6363 		list_del(&early_h2c->list);
6364 		kfree(early_h2c->h2c);
6365 		kfree(early_h2c);
6366 	}
6367 }
6368 
6369 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
6370 {
6371 	lockdep_assert_wiphy(rtwdev->hw->wiphy);
6372 
6373 	__rtw89_fw_free_all_early_h2c(rtwdev);
6374 }
6375 
6376 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
6377 {
6378 	const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
6379 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
6380 
6381 	attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
6382 	attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
6383 	attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
6384 	attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
6385 }
6386 
6387 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
6388 				    struct sk_buff *c2h)
6389 {
6390 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
6391 	u8 category = attr->category;
6392 	u8 class = attr->class;
6393 	u8 func = attr->func;
6394 
6395 	switch (category) {
6396 	default:
6397 		return false;
6398 	case RTW89_C2H_CAT_MAC:
6399 		return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
6400 	case RTW89_C2H_CAT_OUTSRC:
6401 		return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
6402 	}
6403 }
6404 
6405 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
6406 {
6407 	rtw89_fw_c2h_parse_attr(c2h);
6408 	if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
6409 		goto enqueue;
6410 
6411 	rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
6412 	dev_kfree_skb_any(c2h);
6413 	return;
6414 
6415 enqueue:
6416 	skb_queue_tail(&rtwdev->c2h_queue, c2h);
6417 	wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work);
6418 }
6419 
6420 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
6421 				    struct sk_buff *skb)
6422 {
6423 	struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
6424 	u8 category = attr->category;
6425 	u8 class = attr->class;
6426 	u8 func = attr->func;
6427 	u16 len = attr->len;
6428 	bool dump = true;
6429 
6430 	if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
6431 		return;
6432 
6433 	switch (category) {
6434 	case RTW89_C2H_CAT_TEST:
6435 		break;
6436 	case RTW89_C2H_CAT_MAC:
6437 		rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
6438 		if (class == RTW89_MAC_C2H_CLASS_INFO &&
6439 		    func == RTW89_MAC_C2H_FUNC_C2H_LOG)
6440 			dump = false;
6441 		break;
6442 	case RTW89_C2H_CAT_OUTSRC:
6443 		if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
6444 		    class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
6445 			rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
6446 		else
6447 			rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
6448 		break;
6449 	}
6450 
6451 	if (dump)
6452 		rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
6453 }
6454 
6455 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work)
6456 {
6457 	struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
6458 						c2h_work);
6459 	struct sk_buff *skb, *tmp;
6460 
6461 	lockdep_assert_wiphy(rtwdev->hw->wiphy);
6462 
6463 	skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
6464 		skb_unlink(skb, &rtwdev->c2h_queue);
6465 		rtw89_fw_c2h_cmd_handle(rtwdev, skb);
6466 		dev_kfree_skb_any(skb);
6467 	}
6468 }
6469 
6470 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
6471 				  struct rtw89_mac_h2c_info *info)
6472 {
6473 	const struct rtw89_chip_info *chip = rtwdev->chip;
6474 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
6475 	const u32 *h2c_reg = chip->h2c_regs;
6476 	u8 i, val, len;
6477 	int ret;
6478 
6479 	ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
6480 				rtwdev, chip->h2c_ctrl_reg);
6481 	if (ret) {
6482 		rtw89_warn(rtwdev, "FW does not process h2c registers\n");
6483 		return ret;
6484 	}
6485 
6486 	len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
6487 			   sizeof(info->u.h2creg[0]));
6488 
6489 	u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
6490 	u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
6491 
6492 	for (i = 0; i < RTW89_H2CREG_MAX; i++)
6493 		rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
6494 
6495 	fw_info->h2c_counter++;
6496 	rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
6497 			  chip->h2c_counter_reg.mask, fw_info->h2c_counter);
6498 	rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
6499 
6500 	return 0;
6501 }
6502 
6503 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
6504 				 struct rtw89_mac_c2h_info *info)
6505 {
6506 	const struct rtw89_chip_info *chip = rtwdev->chip;
6507 	struct rtw89_fw_info *fw_info = &rtwdev->fw;
6508 	const u32 *c2h_reg = chip->c2h_regs;
6509 	u32 ret;
6510 	u8 i, val;
6511 
6512 	info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
6513 
6514 	ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
6515 				       RTW89_C2H_TIMEOUT, false, rtwdev,
6516 				       chip->c2h_ctrl_reg);
6517 	if (ret) {
6518 		rtw89_warn(rtwdev, "c2h reg timeout\n");
6519 		return ret;
6520 	}
6521 
6522 	for (i = 0; i < RTW89_C2HREG_MAX; i++)
6523 		info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
6524 
6525 	rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
6526 
6527 	info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
6528 	info->content_len =
6529 		(u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
6530 		RTW89_C2HREG_HDR_LEN;
6531 
6532 	fw_info->c2h_counter++;
6533 	rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
6534 			  chip->c2h_counter_reg.mask, fw_info->c2h_counter);
6535 
6536 	return 0;
6537 }
6538 
6539 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
6540 		     struct rtw89_mac_h2c_info *h2c_info,
6541 		     struct rtw89_mac_c2h_info *c2h_info)
6542 {
6543 	u32 ret;
6544 
6545 	if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
6546 		lockdep_assert_wiphy(rtwdev->hw->wiphy);
6547 
6548 	if (!h2c_info && !c2h_info)
6549 		return -EINVAL;
6550 
6551 	if (!h2c_info)
6552 		goto recv_c2h;
6553 
6554 	ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
6555 	if (ret)
6556 		return ret;
6557 
6558 recv_c2h:
6559 	if (!c2h_info)
6560 		return 0;
6561 
6562 	ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
6563 	if (ret)
6564 		return ret;
6565 
6566 	return 0;
6567 }
6568 
6569 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
6570 {
6571 	if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
6572 		rtw89_err(rtwdev, "[ERR]pwr is off\n");
6573 		return;
6574 	}
6575 
6576 	rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
6577 	rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
6578 	rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
6579 	rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
6580 	rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
6581 		   rtw89_read32(rtwdev, R_AX_HALT_C2H));
6582 	rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
6583 		   rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
6584 
6585 	rtw89_fw_prog_cnt_dump(rtwdev);
6586 }
6587 
6588 static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev)
6589 {
6590 	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
6591 	struct rtw89_pktofld_info *info, *tmp;
6592 	u8 idx;
6593 
6594 	for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
6595 		if (!(rtwdev->chip->support_bands & BIT(idx)))
6596 			continue;
6597 
6598 		list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
6599 			if (test_bit(info->id, rtwdev->pkt_offload))
6600 				rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
6601 			list_del(&info->list);
6602 			kfree(info);
6603 		}
6604 	}
6605 }
6606 
6607 static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev,
6608 				  struct rtw89_vif_link *rtwvif_link)
6609 {
6610 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
6611 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6612 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6613 
6614 	mac->free_chan_list(rtwdev);
6615 	rtw89_hw_scan_release_pkt_list(rtwdev);
6616 
6617 	rtwvif->scan_req = NULL;
6618 	rtwvif->scan_ies = NULL;
6619 	scan_info->scanning_vif = NULL;
6620 	scan_info->abort = false;
6621 	scan_info->connected = false;
6622 }
6623 
6624 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
6625 					     struct cfg80211_scan_request *req,
6626 					     struct rtw89_pktofld_info *info,
6627 					     enum nl80211_band band, u8 ssid_idx)
6628 {
6629 	if (band != NL80211_BAND_6GHZ)
6630 		return false;
6631 
6632 	if (req->ssids[ssid_idx].ssid_len) {
6633 		memcpy(info->ssid, req->ssids[ssid_idx].ssid,
6634 		       req->ssids[ssid_idx].ssid_len);
6635 		info->ssid_len = req->ssids[ssid_idx].ssid_len;
6636 		return false;
6637 	} else {
6638 		info->wildcard_6ghz = true;
6639 		return true;
6640 	}
6641 }
6642 
6643 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
6644 				     struct rtw89_vif_link *rtwvif_link,
6645 				     struct sk_buff *skb, u8 ssid_idx)
6646 {
6647 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6648 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6649 	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
6650 	struct cfg80211_scan_request *req = rtwvif->scan_req;
6651 	struct rtw89_pktofld_info *info;
6652 	struct sk_buff *new;
6653 	int ret = 0;
6654 	u8 band;
6655 
6656 	for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
6657 		if (!(rtwdev->chip->support_bands & BIT(band)))
6658 			continue;
6659 
6660 		new = skb_copy(skb, GFP_KERNEL);
6661 		if (!new) {
6662 			ret = -ENOMEM;
6663 			goto out;
6664 		}
6665 		skb_put_data(new, ies->ies[band], ies->len[band]);
6666 		skb_put_data(new, ies->common_ies, ies->common_ie_len);
6667 
6668 		info = kzalloc(sizeof(*info), GFP_KERNEL);
6669 		if (!info) {
6670 			ret = -ENOMEM;
6671 			kfree_skb(new);
6672 			goto out;
6673 		}
6674 
6675 		rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx);
6676 
6677 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
6678 		if (ret) {
6679 			kfree_skb(new);
6680 			kfree(info);
6681 			goto out;
6682 		}
6683 
6684 		list_add_tail(&info->list, &scan_info->pkt_list[band]);
6685 		kfree_skb(new);
6686 	}
6687 out:
6688 	return ret;
6689 }
6690 
6691 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
6692 					  struct rtw89_vif_link *rtwvif_link,
6693 					  const u8 *mac_addr)
6694 {
6695 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6696 	struct cfg80211_scan_request *req = rtwvif->scan_req;
6697 	struct sk_buff *skb;
6698 	u8 num = req->n_ssids, i;
6699 	int ret;
6700 
6701 	for (i = 0; i < num; i++) {
6702 		skb = ieee80211_probereq_get(rtwdev->hw, mac_addr,
6703 					     req->ssids[i].ssid,
6704 					     req->ssids[i].ssid_len,
6705 					     req->ie_len);
6706 		if (!skb)
6707 			return -ENOMEM;
6708 
6709 		ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i);
6710 		kfree_skb(skb);
6711 
6712 		if (ret)
6713 			return ret;
6714 	}
6715 
6716 	return 0;
6717 }
6718 
6719 static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev,
6720 					 struct ieee80211_scan_ies *ies,
6721 					 struct cfg80211_scan_request *req,
6722 					 struct rtw89_mac_chinfo_ax *ch_info)
6723 {
6724 	struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6725 	struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
6726 	struct cfg80211_scan_6ghz_params *params;
6727 	struct rtw89_pktofld_info *info, *tmp;
6728 	struct ieee80211_hdr *hdr;
6729 	struct sk_buff *skb;
6730 	bool found;
6731 	int ret = 0;
6732 	u8 i;
6733 
6734 	if (!req->n_6ghz_params)
6735 		return 0;
6736 
6737 	for (i = 0; i < req->n_6ghz_params; i++) {
6738 		params = &req->scan_6ghz_params[i];
6739 
6740 		if (req->channels[params->channel_idx]->hw_value !=
6741 		    ch_info->pri_ch)
6742 			continue;
6743 
6744 		found = false;
6745 		list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
6746 			if (ether_addr_equal(tmp->bssid, params->bssid)) {
6747 				found = true;
6748 				break;
6749 			}
6750 		}
6751 		if (found)
6752 			continue;
6753 
6754 		skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
6755 					     NULL, 0, req->ie_len);
6756 		if (!skb)
6757 			return -ENOMEM;
6758 
6759 		skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
6760 		skb_put_data(skb, ies->common_ies, ies->common_ie_len);
6761 		hdr = (struct ieee80211_hdr *)skb->data;
6762 		ether_addr_copy(hdr->addr3, params->bssid);
6763 
6764 		info = kzalloc(sizeof(*info), GFP_KERNEL);
6765 		if (!info) {
6766 			ret = -ENOMEM;
6767 			kfree_skb(skb);
6768 			goto out;
6769 		}
6770 
6771 		ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
6772 		if (ret) {
6773 			kfree_skb(skb);
6774 			kfree(info);
6775 			goto out;
6776 		}
6777 
6778 		ether_addr_copy(info->bssid, params->bssid);
6779 		info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
6780 		list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
6781 
6782 		ch_info->tx_pkt = true;
6783 		ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
6784 
6785 		kfree_skb(skb);
6786 	}
6787 
6788 out:
6789 	return ret;
6790 }
6791 
6792 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
6793 				       int chan_type, int ssid_num,
6794 				       struct rtw89_mac_chinfo_ax *ch_info)
6795 {
6796 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6797 	struct rtw89_pktofld_info *info;
6798 	u8 probe_count = 0;
6799 
6800 	ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6801 	ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6802 	ch_info->bw = RTW89_SCAN_WIDTH;
6803 	ch_info->tx_pkt = true;
6804 	ch_info->cfg_tx_pwr = false;
6805 	ch_info->tx_pwr_idx = 0;
6806 	ch_info->tx_null = false;
6807 	ch_info->pause_data = false;
6808 	ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6809 
6810 	if (ssid_num) {
6811 		list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
6812 			if (info->channel_6ghz &&
6813 			    ch_info->pri_ch != info->channel_6ghz)
6814 				continue;
6815 			else if (info->channel_6ghz && probe_count != 0)
6816 				ch_info->period += RTW89_CHANNEL_TIME_6G;
6817 
6818 			if (info->wildcard_6ghz)
6819 				continue;
6820 
6821 			ch_info->pkt_id[probe_count++] = info->id;
6822 			if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6823 				break;
6824 		}
6825 		ch_info->num_pkt = probe_count;
6826 	}
6827 
6828 	switch (chan_type) {
6829 	case RTW89_CHAN_DFS:
6830 		if (ch_info->ch_band != RTW89_BAND_6G)
6831 			ch_info->period = max_t(u8, ch_info->period,
6832 						RTW89_DFS_CHAN_TIME);
6833 		ch_info->dwell_time = RTW89_DWELL_TIME;
6834 		break;
6835 	case RTW89_CHAN_ACTIVE:
6836 		break;
6837 	default:
6838 		rtw89_err(rtwdev, "Channel type out of bound\n");
6839 	}
6840 }
6841 
6842 static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
6843 				      int ssid_num,
6844 				      struct rtw89_mac_chinfo_ax *ch_info)
6845 {
6846 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6847 	struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6848 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6849 	struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
6850 	struct cfg80211_scan_request *req = rtwvif->scan_req;
6851 	struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
6852 	struct rtw89_pktofld_info *info;
6853 	u8 band, probe_count = 0;
6854 	int ret;
6855 
6856 	ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6857 	ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6858 	ch_info->bw = RTW89_SCAN_WIDTH;
6859 	ch_info->tx_pkt = true;
6860 	ch_info->cfg_tx_pwr = false;
6861 	ch_info->tx_pwr_idx = 0;
6862 	ch_info->tx_null = false;
6863 	ch_info->pause_data = false;
6864 	ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6865 
6866 	if (ch_info->ch_band == RTW89_BAND_6G) {
6867 		if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
6868 		    !ch_info->is_psc) {
6869 			ch_info->tx_pkt = false;
6870 			if (!req->duration_mandatory)
6871 				ch_info->period -= RTW89_DWELL_TIME_6G;
6872 		}
6873 	}
6874 
6875 	ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info);
6876 	if (ret)
6877 		rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
6878 
6879 	if (ssid_num) {
6880 		band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
6881 
6882 		list_for_each_entry(info, &scan_info->pkt_list[band], list) {
6883 			if (info->channel_6ghz &&
6884 			    ch_info->pri_ch != info->channel_6ghz)
6885 				continue;
6886 			else if (info->channel_6ghz && probe_count != 0)
6887 				ch_info->period += RTW89_CHANNEL_TIME_6G;
6888 
6889 			if (info->wildcard_6ghz)
6890 				continue;
6891 
6892 			ch_info->pkt_id[probe_count++] = info->id;
6893 			if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6894 				break;
6895 		}
6896 		ch_info->num_pkt = probe_count;
6897 	}
6898 
6899 	switch (chan_type) {
6900 	case RTW89_CHAN_OPERATE:
6901 		ch_info->central_ch = op->channel;
6902 		ch_info->pri_ch = op->primary_channel;
6903 		ch_info->ch_band = op->band_type;
6904 		ch_info->bw = op->band_width;
6905 		ch_info->tx_null = true;
6906 		ch_info->num_pkt = 0;
6907 		break;
6908 	case RTW89_CHAN_DFS:
6909 		if (ch_info->ch_band != RTW89_BAND_6G)
6910 			ch_info->period = max_t(u8, ch_info->period,
6911 						RTW89_DFS_CHAN_TIME);
6912 		ch_info->dwell_time = RTW89_DWELL_TIME;
6913 		ch_info->pause_data = true;
6914 		break;
6915 	case RTW89_CHAN_ACTIVE:
6916 		ch_info->pause_data = true;
6917 		break;
6918 	default:
6919 		rtw89_err(rtwdev, "Channel type out of bound\n");
6920 	}
6921 }
6922 
6923 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
6924 				       int ssid_num,
6925 				       struct rtw89_mac_chinfo_be *ch_info)
6926 {
6927 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6928 	struct rtw89_pktofld_info *info;
6929 	u8 probe_count = 0, i;
6930 
6931 	ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6932 	ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6933 	ch_info->bw = RTW89_SCAN_WIDTH;
6934 	ch_info->tx_null = false;
6935 	ch_info->pause_data = false;
6936 	ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6937 
6938 	if (ssid_num) {
6939 		list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
6940 			ch_info->pkt_id[probe_count++] = info->id;
6941 			if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6942 				break;
6943 		}
6944 	}
6945 
6946 	for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
6947 		ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
6948 
6949 	switch (chan_type) {
6950 	case RTW89_CHAN_DFS:
6951 		ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
6952 		ch_info->dwell_time = RTW89_DWELL_TIME;
6953 		break;
6954 	case RTW89_CHAN_ACTIVE:
6955 		break;
6956 	default:
6957 		rtw89_warn(rtwdev, "Channel type out of bound\n");
6958 		break;
6959 	}
6960 }
6961 
6962 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
6963 				      int ssid_num,
6964 				      struct rtw89_mac_chinfo_be *ch_info)
6965 {
6966 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6967 	struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6968 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6969 	struct cfg80211_scan_request *req = rtwvif->scan_req;
6970 	struct rtw89_pktofld_info *info;
6971 	u8 band, probe_count = 0, i;
6972 
6973 	ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6974 	ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6975 	ch_info->bw = RTW89_SCAN_WIDTH;
6976 	ch_info->tx_null = false;
6977 	ch_info->pause_data = false;
6978 	ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6979 
6980 	if (ssid_num) {
6981 		band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
6982 
6983 		list_for_each_entry(info, &scan_info->pkt_list[band], list) {
6984 			if (info->channel_6ghz &&
6985 			    ch_info->pri_ch != info->channel_6ghz)
6986 				continue;
6987 
6988 			if (info->wildcard_6ghz)
6989 				continue;
6990 
6991 			ch_info->pkt_id[probe_count++] = info->id;
6992 			if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6993 				break;
6994 		}
6995 	}
6996 
6997 	if (ch_info->ch_band == RTW89_BAND_6G) {
6998 		if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
6999 		    !ch_info->is_psc) {
7000 			ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
7001 			if (!req->duration_mandatory)
7002 				ch_info->period -= RTW89_DWELL_TIME_6G;
7003 		}
7004 	}
7005 
7006 	for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
7007 		ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
7008 
7009 	switch (chan_type) {
7010 	case RTW89_CHAN_DFS:
7011 		if (ch_info->ch_band != RTW89_BAND_6G)
7012 			ch_info->period =
7013 				max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
7014 		ch_info->dwell_time = RTW89_DWELL_TIME;
7015 		ch_info->pause_data = true;
7016 		break;
7017 	case RTW89_CHAN_ACTIVE:
7018 		ch_info->pause_data = true;
7019 		break;
7020 	default:
7021 		rtw89_warn(rtwdev, "Channel type out of bound\n");
7022 		break;
7023 	}
7024 }
7025 
7026 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
7027 				    struct rtw89_vif_link *rtwvif_link)
7028 {
7029 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7030 	struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
7031 	struct rtw89_mac_chinfo_ax *ch_info, *tmp;
7032 	struct ieee80211_channel *channel;
7033 	struct list_head chan_list;
7034 	int list_len;
7035 	enum rtw89_chan_type type;
7036 	int ret = 0;
7037 	u32 idx;
7038 
7039 	INIT_LIST_HEAD(&chan_list);
7040 	for (idx = 0, list_len = 0;
7041 	     idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
7042 	     idx++, list_len++) {
7043 		channel = nd_config->channels[idx];
7044 		ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7045 		if (!ch_info) {
7046 			ret = -ENOMEM;
7047 			goto out;
7048 		}
7049 
7050 		ch_info->period = RTW89_CHANNEL_TIME;
7051 		ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7052 		ch_info->central_ch = channel->hw_value;
7053 		ch_info->pri_ch = channel->hw_value;
7054 		ch_info->is_psc = cfg80211_channel_is_psc(channel);
7055 
7056 		if (channel->flags &
7057 		    (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7058 			type = RTW89_CHAN_DFS;
7059 		else
7060 			type = RTW89_CHAN_ACTIVE;
7061 
7062 		rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
7063 		list_add_tail(&ch_info->list, &chan_list);
7064 	}
7065 	ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list);
7066 
7067 out:
7068 	list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7069 		list_del(&ch_info->list);
7070 		kfree(ch_info);
7071 	}
7072 
7073 	return ret;
7074 }
7075 
7076 int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
7077 				    struct rtw89_vif_link *rtwvif_link)
7078 {
7079 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7080 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7081 	struct cfg80211_scan_request *req = rtwvif->scan_req;
7082 	struct rtw89_mac_chinfo_ax *ch_info, *tmp;
7083 	struct ieee80211_channel *channel;
7084 	struct list_head chan_list;
7085 	bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
7086 	enum rtw89_chan_type type;
7087 	int off_chan_time = 0;
7088 	int ret;
7089 	u32 idx;
7090 
7091 	INIT_LIST_HEAD(&chan_list);
7092 
7093 	for (idx = 0; idx < req->n_channels; idx++) {
7094 		channel = req->channels[idx];
7095 		ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7096 		if (!ch_info) {
7097 			ret = -ENOMEM;
7098 			goto out;
7099 		}
7100 
7101 		if (req->duration)
7102 			ch_info->period = req->duration;
7103 		else if (channel->band == NL80211_BAND_6GHZ)
7104 			ch_info->period = RTW89_CHANNEL_TIME_6G +
7105 					  RTW89_DWELL_TIME_6G;
7106 		else
7107 			ch_info->period = RTW89_CHANNEL_TIME;
7108 
7109 		ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7110 		ch_info->central_ch = channel->hw_value;
7111 		ch_info->pri_ch = channel->hw_value;
7112 		ch_info->rand_seq_num = random_seq;
7113 		ch_info->is_psc = cfg80211_channel_is_psc(channel);
7114 
7115 		if (channel->flags &
7116 		    (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7117 			type = RTW89_CHAN_DFS;
7118 		else
7119 			type = RTW89_CHAN_ACTIVE;
7120 		rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info);
7121 
7122 		if (scan_info->connected &&
7123 		    off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
7124 			tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
7125 			if (!tmp) {
7126 				ret = -ENOMEM;
7127 				kfree(ch_info);
7128 				goto out;
7129 			}
7130 
7131 			type = RTW89_CHAN_OPERATE;
7132 			tmp->period = req->duration_mandatory ?
7133 				      req->duration : RTW89_CHANNEL_TIME;
7134 			rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp);
7135 			list_add_tail(&tmp->list, &chan_list);
7136 			off_chan_time = 0;
7137 		}
7138 		list_add_tail(&ch_info->list, &chan_list);
7139 		off_chan_time += ch_info->period;
7140 	}
7141 
7142 	list_splice_tail(&chan_list, &scan_info->chan_list);
7143 	return 0;
7144 
7145 out:
7146 	list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7147 		list_del(&ch_info->list);
7148 		kfree(ch_info);
7149 	}
7150 
7151 	return ret;
7152 }
7153 
7154 void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev)
7155 {
7156 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7157 	struct rtw89_mac_chinfo_ax *ch_info, *tmp;
7158 
7159 	list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
7160 		list_del(&ch_info->list);
7161 		kfree(ch_info);
7162 	}
7163 }
7164 
7165 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
7166 				   struct rtw89_vif_link *rtwvif_link)
7167 {
7168 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7169 	struct rtw89_mac_chinfo_ax *ch_info, *tmp;
7170 	unsigned int list_len = 0;
7171 	struct list_head list;
7172 	int ret;
7173 
7174 	INIT_LIST_HEAD(&list);
7175 
7176 	list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
7177 		list_move_tail(&ch_info->list, &list);
7178 
7179 		list_len++;
7180 		if (list_len == RTW89_SCAN_LIST_LIMIT_AX)
7181 			break;
7182 	}
7183 
7184 	ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list);
7185 
7186 	list_for_each_entry_safe(ch_info, tmp, &list, list) {
7187 		list_del(&ch_info->list);
7188 		kfree(ch_info);
7189 	}
7190 
7191 	return ret;
7192 }
7193 
7194 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
7195 				    struct rtw89_vif_link *rtwvif_link)
7196 {
7197 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7198 	struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
7199 	struct rtw89_mac_chinfo_be *ch_info, *tmp;
7200 	struct ieee80211_channel *channel;
7201 	struct list_head chan_list;
7202 	enum rtw89_chan_type type;
7203 	int list_len, ret;
7204 	u32 idx;
7205 
7206 	INIT_LIST_HEAD(&chan_list);
7207 
7208 	for (idx = 0, list_len = 0;
7209 	     idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
7210 	     idx++, list_len++) {
7211 		channel = nd_config->channels[idx];
7212 		ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7213 		if (!ch_info) {
7214 			ret = -ENOMEM;
7215 			goto out;
7216 		}
7217 
7218 		ch_info->period = RTW89_CHANNEL_TIME;
7219 		ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7220 		ch_info->central_ch = channel->hw_value;
7221 		ch_info->pri_ch = channel->hw_value;
7222 		ch_info->is_psc = cfg80211_channel_is_psc(channel);
7223 
7224 		if (channel->flags &
7225 		    (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7226 			type = RTW89_CHAN_DFS;
7227 		else
7228 			type = RTW89_CHAN_ACTIVE;
7229 
7230 		rtw89_pno_scan_add_chan_be(rtwdev, type,
7231 					   nd_config->n_match_sets, ch_info);
7232 		list_add_tail(&ch_info->list, &chan_list);
7233 	}
7234 
7235 	ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
7236 						rtwvif_link);
7237 
7238 out:
7239 	list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7240 		list_del(&ch_info->list);
7241 		kfree(ch_info);
7242 	}
7243 
7244 	return ret;
7245 }
7246 
7247 int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev,
7248 				    struct rtw89_vif_link *rtwvif_link)
7249 {
7250 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7251 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7252 	struct cfg80211_scan_request *req = rtwvif->scan_req;
7253 	struct rtw89_mac_chinfo_be *ch_info, *tmp;
7254 	struct ieee80211_channel *channel;
7255 	struct list_head chan_list;
7256 	enum rtw89_chan_type type;
7257 	bool random_seq;
7258 	int ret;
7259 	u32 idx;
7260 
7261 	random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
7262 	INIT_LIST_HEAD(&chan_list);
7263 
7264 	for (idx = 0; idx < req->n_channels; idx++) {
7265 		channel = req->channels[idx];
7266 		ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7267 		if (!ch_info) {
7268 			ret = -ENOMEM;
7269 			goto out;
7270 		}
7271 
7272 		if (req->duration)
7273 			ch_info->period = req->duration;
7274 		else if (channel->band == NL80211_BAND_6GHZ)
7275 			ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
7276 		else
7277 			ch_info->period = RTW89_CHANNEL_TIME;
7278 
7279 		ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7280 		ch_info->central_ch = channel->hw_value;
7281 		ch_info->pri_ch = channel->hw_value;
7282 		ch_info->rand_seq_num = random_seq;
7283 		ch_info->is_psc = cfg80211_channel_is_psc(channel);
7284 
7285 		if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7286 			type = RTW89_CHAN_DFS;
7287 		else
7288 			type = RTW89_CHAN_ACTIVE;
7289 		rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
7290 
7291 		list_add_tail(&ch_info->list, &chan_list);
7292 	}
7293 
7294 	list_splice_tail(&chan_list, &scan_info->chan_list);
7295 	return 0;
7296 
7297 out:
7298 	list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7299 		list_del(&ch_info->list);
7300 		kfree(ch_info);
7301 	}
7302 
7303 	return ret;
7304 }
7305 
7306 void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev)
7307 {
7308 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7309 	struct rtw89_mac_chinfo_be *ch_info, *tmp;
7310 
7311 	list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
7312 		list_del(&ch_info->list);
7313 		kfree(ch_info);
7314 	}
7315 }
7316 
7317 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
7318 				   struct rtw89_vif_link *rtwvif_link)
7319 {
7320 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7321 	struct rtw89_mac_chinfo_be *ch_info, *tmp;
7322 	unsigned int list_len = 0;
7323 	struct list_head list;
7324 	int ret;
7325 
7326 	INIT_LIST_HEAD(&list);
7327 
7328 	list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
7329 		list_move_tail(&ch_info->list, &list);
7330 
7331 		list_len++;
7332 		if (list_len == RTW89_SCAN_LIST_LIMIT_BE)
7333 			break;
7334 	}
7335 
7336 	ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list,
7337 						rtwvif_link);
7338 
7339 	list_for_each_entry_safe(ch_info, tmp, &list, list) {
7340 		list_del(&ch_info->list);
7341 		kfree(ch_info);
7342 	}
7343 
7344 	return ret;
7345 }
7346 
7347 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
7348 				   struct rtw89_vif_link *rtwvif_link,
7349 				   const u8 *mac_addr)
7350 {
7351 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7352 	int ret;
7353 
7354 	ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr);
7355 	if (ret) {
7356 		rtw89_err(rtwdev, "Update probe request failed\n");
7357 		goto out;
7358 	}
7359 	ret = mac->prep_chan_list(rtwdev, rtwvif_link);
7360 out:
7361 	return ret;
7362 }
7363 
7364 static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev,
7365 						 struct rtw89_vif_link *rtwvif_link,
7366 						 u16 tu)
7367 {
7368 	struct ieee80211_p2p_noa_desc noa_desc = {};
7369 	u64 tsf;
7370 	int ret;
7371 
7372 	ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
7373 	if (ret) {
7374 		rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__);
7375 		return;
7376 	}
7377 
7378 	noa_desc.start_time = cpu_to_le32(tsf);
7379 	noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu));
7380 	noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu));
7381 	noa_desc.count = 1;
7382 
7383 	rtw89_p2p_noa_renew(rtwvif_link);
7384 	rtw89_p2p_noa_append(rtwvif_link, &noa_desc);
7385 	rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
7386 }
7387 
7388 static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev,
7389 					    const struct cfg80211_scan_request *req)
7390 {
7391 	const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
7392 	const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7393 	const struct rtw89_chip_info *chip = rtwdev->chip;
7394 	struct rtw89_mac_chinfo_ax *chinfo_ax;
7395 	struct rtw89_mac_chinfo_be *chinfo_be;
7396 	struct rtw89_vif_link *rtwvif_link;
7397 	struct list_head *pos, *tmp;
7398 	struct ieee80211_vif *vif;
7399 	struct rtw89_vif *rtwvif;
7400 	u16 tu = 0;
7401 
7402 	lockdep_assert_wiphy(rtwdev->hw->wiphy);
7403 
7404 	list_for_each_safe(pos, tmp, &scan_info->chan_list) {
7405 		switch (chip->chip_gen) {
7406 		case RTW89_CHIP_AX:
7407 			chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list);
7408 			tu += chinfo_ax->period;
7409 			break;
7410 		case RTW89_CHIP_BE:
7411 			chinfo_be = list_entry(pos, typeof(*chinfo_be), list);
7412 			tu += chinfo_be->period;
7413 			break;
7414 		default:
7415 			rtw89_warn(rtwdev, "%s: invalid chip gen %d\n",
7416 				   __func__, chip->chip_gen);
7417 			return;
7418 		}
7419 	}
7420 
7421 	if (unlikely(tu == 0)) {
7422 		rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
7423 			    "%s: cannot estimate needed TU\n", __func__);
7424 		return;
7425 	}
7426 
7427 	list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) {
7428 		unsigned int link_id;
7429 
7430 		vif = rtwvif_to_vif(rtwvif);
7431 		if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
7432 			continue;
7433 
7434 		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
7435 			rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, tu);
7436 	}
7437 }
7438 
7439 int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
7440 			struct rtw89_vif_link *rtwvif_link,
7441 			struct ieee80211_scan_request *scan_req)
7442 {
7443 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7444 	enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
7445 	struct cfg80211_scan_request *req = &scan_req->req;
7446 	const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
7447 						       rtwvif_link->chanctx_idx);
7448 	struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7449 	struct rtw89_chanctx_pause_parm pause_parm = {
7450 		.rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN,
7451 		.trigger = rtwvif_link,
7452 	};
7453 	u32 rx_fltr = rtwdev->hal.rx_fltr;
7454 	u8 mac_addr[ETH_ALEN];
7455 	u32 reg;
7456 	int ret;
7457 
7458 	/* clone op and keep it during scan */
7459 	rtwdev->scan_info.op_chan = *chan;
7460 
7461 	rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
7462 	rtwdev->scan_info.scanning_vif = rtwvif_link;
7463 	rtwdev->scan_info.abort = false;
7464 	rtwvif->scan_ies = &scan_req->ies;
7465 	rtwvif->scan_req = req;
7466 
7467 	if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
7468 		get_random_mask_addr(mac_addr, req->mac_addr,
7469 				     req->mac_addr_mask);
7470 	else
7471 		ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
7472 
7473 	ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr);
7474 	if (ret) {
7475 		rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
7476 		return ret;
7477 	}
7478 
7479 	ieee80211_stop_queues(rtwdev->hw);
7480 	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
7481 
7482 	rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true);
7483 
7484 	rx_fltr &= ~B_AX_A_BCN_CHK_EN;
7485 	rx_fltr &= ~B_AX_A_BC;
7486 	rx_fltr &= ~B_AX_A_A1_MATCH;
7487 
7488 	reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
7489 	rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr);
7490 
7491 	rtw89_chanctx_pause(rtwdev, &pause_parm);
7492 
7493 	if (mode == RTW89_ENTITY_MODE_MCC)
7494 		rtw89_hw_scan_update_beacon_noa(rtwdev, req);
7495 
7496 	return 0;
7497 }
7498 
7499 struct rtw89_hw_scan_complete_cb_data {
7500 	struct rtw89_vif_link *rtwvif_link;
7501 	bool aborted;
7502 };
7503 
7504 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
7505 {
7506 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7507 	struct rtw89_hw_scan_complete_cb_data *cb_data = data;
7508 	struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
7509 	struct cfg80211_scan_info info = {
7510 		.aborted = cb_data->aborted,
7511 	};
7512 	u32 reg;
7513 
7514 	if (!rtwvif_link)
7515 		return -EINVAL;
7516 
7517 	reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
7518 	rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
7519 
7520 	rtw89_core_scan_complete(rtwdev, rtwvif_link, true);
7521 	ieee80211_scan_completed(rtwdev->hw, &info);
7522 	ieee80211_wake_queues(rtwdev->hw);
7523 	rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
7524 	rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
7525 
7526 	rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
7527 
7528 	return 0;
7529 }
7530 
7531 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
7532 			    struct rtw89_vif_link *rtwvif_link,
7533 			    bool aborted)
7534 {
7535 	struct rtw89_hw_scan_complete_cb_data cb_data = {
7536 		.rtwvif_link = rtwvif_link,
7537 		.aborted = aborted,
7538 	};
7539 	const struct rtw89_chanctx_cb_parm cb_parm = {
7540 		.cb = rtw89_hw_scan_complete_cb,
7541 		.data = &cb_data,
7542 		.caller = __func__,
7543 	};
7544 
7545 	/* The things here needs to be done after setting channel (for coex)
7546 	 * and before proceeding entity mode (for MCC). So, pass a callback
7547 	 * of them for the right sequence rather than doing them directly.
7548 	 */
7549 	rtw89_chanctx_proceed(rtwdev, &cb_parm);
7550 }
7551 
7552 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
7553 			 struct rtw89_vif_link *rtwvif_link)
7554 {
7555 	struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7556 	int ret;
7557 
7558 	scan_info->abort = true;
7559 
7560 	ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false);
7561 	if (ret)
7562 		rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret);
7563 
7564 	/* Indicate ieee80211_scan_completed() before returning, which is safe
7565 	 * because scan abort command always waits for completion of
7566 	 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan
7567 	 * work properly.
7568 	 */
7569 	rtw89_hw_scan_complete(rtwdev, rtwvif_link, true);
7570 }
7571 
7572 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
7573 {
7574 	struct rtw89_vif_link *rtwvif_link;
7575 	struct rtw89_vif *rtwvif;
7576 	unsigned int link_id;
7577 
7578 	rtw89_for_each_rtwvif(rtwdev, rtwvif) {
7579 		rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
7580 			/* This variable implies connected or during attempt to connect */
7581 			if (!is_zero_ether_addr(rtwvif_link->bssid))
7582 				return true;
7583 		}
7584 	}
7585 
7586 	return false;
7587 }
7588 
7589 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
7590 			  struct rtw89_vif_link *rtwvif_link,
7591 			  bool enable)
7592 {
7593 	const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7594 	struct rtw89_scan_option opt = {0};
7595 	bool connected;
7596 	int ret = 0;
7597 
7598 	if (!rtwvif_link)
7599 		return -EINVAL;
7600 
7601 	connected = rtwdev->scan_info.connected;
7602 	opt.enable = enable;
7603 	opt.target_ch_mode = connected;
7604 	if (enable) {
7605 		ret = mac->add_chan_list(rtwdev, rtwvif_link);
7606 		if (ret)
7607 			goto out;
7608 	}
7609 
7610 	if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
7611 		opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
7612 		opt.scan_mode = RTW89_SCAN_MODE_SA;
7613 		opt.band = rtwvif_link->mac_idx;
7614 		opt.num_macc_role = 0;
7615 		opt.mlo_mode = rtwdev->mlo_dbcc_mode;
7616 		opt.num_opch = connected ? 1 : 0;
7617 		opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
7618 	}
7619 
7620 	ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false);
7621 out:
7622 	return ret;
7623 }
7624 
7625 #define H2C_FW_CPU_EXCEPTION_LEN 4
7626 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
7627 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
7628 {
7629 	struct sk_buff *skb;
7630 	int ret;
7631 
7632 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
7633 	if (!skb) {
7634 		rtw89_err(rtwdev,
7635 			  "failed to alloc skb for fw cpu exception\n");
7636 		return -ENOMEM;
7637 	}
7638 
7639 	skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
7640 	RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
7641 					   H2C_FW_CPU_EXCEPTION_TYPE_DEF);
7642 
7643 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7644 			      H2C_CAT_TEST,
7645 			      H2C_CL_FW_STATUS_TEST,
7646 			      H2C_FUNC_CPU_EXCEPTION, 0, 0,
7647 			      H2C_FW_CPU_EXCEPTION_LEN);
7648 
7649 	ret = rtw89_h2c_tx(rtwdev, skb, false);
7650 	if (ret) {
7651 		rtw89_err(rtwdev, "failed to send h2c\n");
7652 		goto fail;
7653 	}
7654 
7655 	return 0;
7656 
7657 fail:
7658 	dev_kfree_skb_any(skb);
7659 	return ret;
7660 }
7661 
7662 #define H2C_PKT_DROP_LEN 24
7663 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
7664 			  const struct rtw89_pkt_drop_params *params)
7665 {
7666 	struct sk_buff *skb;
7667 	int ret;
7668 
7669 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
7670 	if (!skb) {
7671 		rtw89_err(rtwdev,
7672 			  "failed to alloc skb for packet drop\n");
7673 		return -ENOMEM;
7674 	}
7675 
7676 	switch (params->sel) {
7677 	case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
7678 	case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
7679 	case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
7680 	case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
7681 	case RTW89_PKT_DROP_SEL_BAND_ONCE:
7682 		break;
7683 	default:
7684 		rtw89_debug(rtwdev, RTW89_DBG_FW,
7685 			    "H2C of pkt drop might not fully support sel: %d yet\n",
7686 			    params->sel);
7687 		break;
7688 	}
7689 
7690 	skb_put(skb, H2C_PKT_DROP_LEN);
7691 	RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
7692 	RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
7693 	RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
7694 	RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
7695 	RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
7696 	RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
7697 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
7698 						  params->macid_band_sel[0]);
7699 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
7700 						  params->macid_band_sel[1]);
7701 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
7702 						  params->macid_band_sel[2]);
7703 	RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
7704 						  params->macid_band_sel[3]);
7705 
7706 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7707 			      H2C_CAT_MAC,
7708 			      H2C_CL_MAC_FW_OFLD,
7709 			      H2C_FUNC_PKT_DROP, 0, 0,
7710 			      H2C_PKT_DROP_LEN);
7711 
7712 	ret = rtw89_h2c_tx(rtwdev, skb, false);
7713 	if (ret) {
7714 		rtw89_err(rtwdev, "failed to send h2c\n");
7715 		goto fail;
7716 	}
7717 
7718 	return 0;
7719 
7720 fail:
7721 	dev_kfree_skb_any(skb);
7722 	return ret;
7723 }
7724 
7725 #define H2C_KEEP_ALIVE_LEN 4
7726 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7727 			    bool enable)
7728 {
7729 	struct sk_buff *skb;
7730 	u8 pkt_id = 0;
7731 	int ret;
7732 
7733 	if (enable) {
7734 		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7735 						   RTW89_PKT_OFLD_TYPE_NULL_DATA,
7736 						   &pkt_id);
7737 		if (ret)
7738 			return -EPERM;
7739 	}
7740 
7741 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
7742 	if (!skb) {
7743 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
7744 		return -ENOMEM;
7745 	}
7746 
7747 	skb_put(skb, H2C_KEEP_ALIVE_LEN);
7748 
7749 	RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
7750 	RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
7751 	RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
7752 	RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id);
7753 
7754 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7755 			      H2C_CAT_MAC,
7756 			      H2C_CL_MAC_WOW,
7757 			      H2C_FUNC_KEEP_ALIVE, 0, 1,
7758 			      H2C_KEEP_ALIVE_LEN);
7759 
7760 	ret = rtw89_h2c_tx(rtwdev, skb, false);
7761 	if (ret) {
7762 		rtw89_err(rtwdev, "failed to send h2c\n");
7763 		goto fail;
7764 	}
7765 
7766 	return 0;
7767 
7768 fail:
7769 	dev_kfree_skb_any(skb);
7770 
7771 	return ret;
7772 }
7773 
7774 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7775 			     bool enable)
7776 {
7777 	struct rtw89_h2c_arp_offload *h2c;
7778 	u32 len = sizeof(*h2c);
7779 	struct sk_buff *skb;
7780 	u8 pkt_id = 0;
7781 	int ret;
7782 
7783 	if (enable) {
7784 		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7785 						   RTW89_PKT_OFLD_TYPE_ARP_RSP,
7786 						   &pkt_id);
7787 		if (ret)
7788 			return ret;
7789 	}
7790 
7791 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7792 	if (!skb) {
7793 		rtw89_err(rtwdev, "failed to alloc skb for arp offload\n");
7794 		return -ENOMEM;
7795 	}
7796 
7797 	skb_put(skb, len);
7798 	h2c = (struct rtw89_h2c_arp_offload *)skb->data;
7799 
7800 	h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) |
7801 		  le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) |
7802 		  le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
7803 		  le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID);
7804 
7805 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7806 			      H2C_CAT_MAC,
7807 			      H2C_CL_MAC_WOW,
7808 			      H2C_FUNC_ARP_OFLD, 0, 1,
7809 			      len);
7810 
7811 	ret = rtw89_h2c_tx(rtwdev, skb, false);
7812 	if (ret) {
7813 		rtw89_err(rtwdev, "failed to send h2c\n");
7814 		goto fail;
7815 	}
7816 
7817 	return 0;
7818 
7819 fail:
7820 	dev_kfree_skb_any(skb);
7821 
7822 	return ret;
7823 }
7824 
7825 #define H2C_DISCONNECT_DETECT_LEN 8
7826 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
7827 				   struct rtw89_vif_link *rtwvif_link, bool enable)
7828 {
7829 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7830 	struct sk_buff *skb;
7831 	u8 macid = rtwvif_link->mac_id;
7832 	int ret;
7833 
7834 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
7835 	if (!skb) {
7836 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
7837 		return -ENOMEM;
7838 	}
7839 
7840 	skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
7841 
7842 	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
7843 		RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
7844 		RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
7845 		RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
7846 		RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
7847 		RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
7848 	}
7849 
7850 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7851 			      H2C_CAT_MAC,
7852 			      H2C_CL_MAC_WOW,
7853 			      H2C_FUNC_DISCONNECT_DETECT, 0, 1,
7854 			      H2C_DISCONNECT_DETECT_LEN);
7855 
7856 	ret = rtw89_h2c_tx(rtwdev, skb, false);
7857 	if (ret) {
7858 		rtw89_err(rtwdev, "failed to send h2c\n");
7859 		goto fail;
7860 	}
7861 
7862 	return 0;
7863 
7864 fail:
7865 	dev_kfree_skb_any(skb);
7866 
7867 	return ret;
7868 }
7869 
7870 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7871 			 bool enable)
7872 {
7873 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7874 	struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
7875 	struct rtw89_h2c_cfg_nlo *h2c;
7876 	u32 len = sizeof(*h2c);
7877 	struct sk_buff *skb;
7878 	int ret, i;
7879 
7880 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7881 	if (!skb) {
7882 		rtw89_err(rtwdev, "failed to alloc skb for nlo\n");
7883 		return -ENOMEM;
7884 	}
7885 
7886 	skb_put(skb, len);
7887 	h2c = (struct rtw89_h2c_cfg_nlo *)skb->data;
7888 
7889 	h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) |
7890 		  le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) |
7891 		  le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID);
7892 
7893 	if (enable) {
7894 		h2c->nlo_cnt = nd_config->n_match_sets;
7895 		for (i = 0 ; i < nd_config->n_match_sets; i++) {
7896 			h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len;
7897 			memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid,
7898 			       nd_config->match_sets[i].ssid.ssid_len);
7899 		}
7900 	}
7901 
7902 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7903 			      H2C_CAT_MAC,
7904 			      H2C_CL_MAC_WOW,
7905 			      H2C_FUNC_NLO, 0, 1,
7906 			      len);
7907 
7908 	ret = rtw89_h2c_tx(rtwdev, skb, false);
7909 	if (ret) {
7910 		rtw89_err(rtwdev, "failed to send h2c\n");
7911 		goto fail;
7912 	}
7913 
7914 	return 0;
7915 
7916 fail:
7917 	dev_kfree_skb_any(skb);
7918 	return ret;
7919 }
7920 
7921 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7922 			    bool enable)
7923 {
7924 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7925 	struct rtw89_h2c_wow_global *h2c;
7926 	u8 macid = rtwvif_link->mac_id;
7927 	u32 len = sizeof(*h2c);
7928 	struct sk_buff *skb;
7929 	int ret;
7930 
7931 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7932 	if (!skb) {
7933 		rtw89_err(rtwdev, "failed to alloc skb for wow global\n");
7934 		return -ENOMEM;
7935 	}
7936 
7937 	skb_put(skb, len);
7938 	h2c = (struct rtw89_h2c_wow_global *)skb->data;
7939 
7940 	h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) |
7941 		  le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) |
7942 		  le32_encode_bits(rtw_wow->ptk_alg,
7943 				   RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) |
7944 		  le32_encode_bits(rtw_wow->gtk_alg,
7945 				   RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO);
7946 	h2c->key_info = rtw_wow->key_info;
7947 
7948 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7949 			      H2C_CAT_MAC,
7950 			      H2C_CL_MAC_WOW,
7951 			      H2C_FUNC_WOW_GLOBAL, 0, 1,
7952 			      len);
7953 
7954 	ret = rtw89_h2c_tx(rtwdev, skb, false);
7955 	if (ret) {
7956 		rtw89_err(rtwdev, "failed to send h2c\n");
7957 		goto fail;
7958 	}
7959 
7960 	return 0;
7961 
7962 fail:
7963 	dev_kfree_skb_any(skb);
7964 
7965 	return ret;
7966 }
7967 
7968 #define H2C_WAKEUP_CTRL_LEN 4
7969 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
7970 				 struct rtw89_vif_link *rtwvif_link,
7971 				 bool enable)
7972 {
7973 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7974 	struct sk_buff *skb;
7975 	u8 macid = rtwvif_link->mac_id;
7976 	int ret;
7977 
7978 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
7979 	if (!skb) {
7980 		rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n");
7981 		return -ENOMEM;
7982 	}
7983 
7984 	skb_put(skb, H2C_WAKEUP_CTRL_LEN);
7985 
7986 	if (rtw_wow->pattern_cnt)
7987 		RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
7988 	if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
7989 		RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
7990 	if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
7991 		RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
7992 
7993 	RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
7994 
7995 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7996 			      H2C_CAT_MAC,
7997 			      H2C_CL_MAC_WOW,
7998 			      H2C_FUNC_WAKEUP_CTRL, 0, 1,
7999 			      H2C_WAKEUP_CTRL_LEN);
8000 
8001 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8002 	if (ret) {
8003 		rtw89_err(rtwdev, "failed to send h2c\n");
8004 		goto fail;
8005 	}
8006 
8007 	return 0;
8008 
8009 fail:
8010 	dev_kfree_skb_any(skb);
8011 
8012 	return ret;
8013 }
8014 
8015 #define H2C_WOW_CAM_UPD_LEN 24
8016 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
8017 			    struct rtw89_wow_cam_info *cam_info)
8018 {
8019 	struct sk_buff *skb;
8020 	int ret;
8021 
8022 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
8023 	if (!skb) {
8024 		rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
8025 		return -ENOMEM;
8026 	}
8027 
8028 	skb_put(skb, H2C_WOW_CAM_UPD_LEN);
8029 
8030 	RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
8031 	RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
8032 	if (cam_info->valid) {
8033 		RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
8034 		RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
8035 		RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
8036 		RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
8037 		RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
8038 		RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
8039 							     cam_info->negative_pattern_match);
8040 		RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
8041 						   cam_info->skip_mac_hdr);
8042 		RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
8043 		RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
8044 		RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
8045 	}
8046 	RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
8047 
8048 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8049 			      H2C_CAT_MAC,
8050 			      H2C_CL_MAC_WOW,
8051 			      H2C_FUNC_WOW_CAM_UPD, 0, 1,
8052 			      H2C_WOW_CAM_UPD_LEN);
8053 
8054 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8055 	if (ret) {
8056 		rtw89_err(rtwdev, "failed to send h2c\n");
8057 		goto fail;
8058 	}
8059 
8060 	return 0;
8061 fail:
8062 	dev_kfree_skb_any(skb);
8063 
8064 	return ret;
8065 }
8066 
8067 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
8068 			      struct rtw89_vif_link *rtwvif_link,
8069 			      bool enable)
8070 {
8071 	struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8072 	struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
8073 	struct rtw89_h2c_wow_gtk_ofld *h2c;
8074 	u8 macid = rtwvif_link->mac_id;
8075 	u32 len = sizeof(*h2c);
8076 	u8 pkt_id_sa_query = 0;
8077 	struct sk_buff *skb;
8078 	u8 pkt_id_eapol = 0;
8079 	int ret;
8080 
8081 	if (!rtw_wow->gtk_alg)
8082 		return 0;
8083 
8084 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8085 	if (!skb) {
8086 		rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n");
8087 		return -ENOMEM;
8088 	}
8089 
8090 	skb_put(skb, len);
8091 	h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data;
8092 
8093 	if (!enable)
8094 		goto hdr;
8095 
8096 	ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
8097 					   RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
8098 					   &pkt_id_eapol);
8099 	if (ret)
8100 		goto fail;
8101 
8102 	if (gtk_info->igtk_keyid) {
8103 		ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
8104 						   RTW89_PKT_OFLD_TYPE_SA_QUERY,
8105 						   &pkt_id_sa_query);
8106 		if (ret)
8107 			goto fail;
8108 	}
8109 
8110 	/* not support TKIP yet */
8111 	h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
8112 		  le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
8113 		  le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
8114 				   RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
8115 		  le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
8116 		  le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID);
8117 	h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0,
8118 				   RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) |
8119 		  le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT);
8120 	h2c->gtk_info = rtw_wow->gtk_info;
8121 
8122 hdr:
8123 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8124 			      H2C_CAT_MAC,
8125 			      H2C_CL_MAC_WOW,
8126 			      H2C_FUNC_GTK_OFLD, 0, 1,
8127 			      len);
8128 
8129 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8130 	if (ret) {
8131 		rtw89_err(rtwdev, "failed to send h2c\n");
8132 		goto fail;
8133 	}
8134 	return 0;
8135 fail:
8136 	dev_kfree_skb_any(skb);
8137 
8138 	return ret;
8139 }
8140 
8141 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
8142 		       bool enable)
8143 {
8144 	struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
8145 	struct rtw89_h2c_fwips *h2c;
8146 	u32 len = sizeof(*h2c);
8147 	struct sk_buff *skb;
8148 
8149 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8150 	if (!skb) {
8151 		rtw89_err(rtwdev, "failed to alloc skb for fw ips\n");
8152 		return -ENOMEM;
8153 	}
8154 	skb_put(skb, len);
8155 	h2c = (struct rtw89_h2c_fwips *)skb->data;
8156 
8157 	h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
8158 		  le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE);
8159 
8160 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8161 			      H2C_CAT_MAC,
8162 			      H2C_CL_MAC_PS,
8163 			      H2C_FUNC_IPS_CFG, 0, 1,
8164 			      len);
8165 
8166 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
8167 }
8168 
8169 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
8170 {
8171 	struct rtw89_wait_info *wait = &rtwdev->wow.wait;
8172 	struct rtw89_h2c_wow_aoac *h2c;
8173 	u32 len = sizeof(*h2c);
8174 	struct sk_buff *skb;
8175 
8176 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8177 	if (!skb) {
8178 		rtw89_err(rtwdev, "failed to alloc skb for aoac\n");
8179 		return -ENOMEM;
8180 	}
8181 
8182 	skb_put(skb, len);
8183 
8184 	/* This H2C only nofity firmware to generate AOAC report C2H,
8185 	 * no need any parameter.
8186 	 */
8187 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8188 			      H2C_CAT_MAC,
8189 			      H2C_CL_MAC_WOW,
8190 			      H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
8191 			      len);
8192 
8193 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
8194 }
8195 
8196 /* Return < 0, if failures happen during waiting for the condition.
8197  * Return 0, when waiting for the condition succeeds.
8198  * Return > 0, if the wait is considered unreachable due to driver/FW design,
8199  * where 1 means during SER.
8200  */
8201 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
8202 				 struct rtw89_wait_info *wait, unsigned int cond)
8203 {
8204 	int ret;
8205 
8206 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8207 	if (ret) {
8208 		rtw89_err(rtwdev, "failed to send h2c\n");
8209 		dev_kfree_skb_any(skb);
8210 		return -EBUSY;
8211 	}
8212 
8213 	if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
8214 		return 1;
8215 
8216 	return rtw89_wait_for_cond(wait, cond);
8217 }
8218 
8219 #define H2C_ADD_MCC_LEN 16
8220 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
8221 			 const struct rtw89_fw_mcc_add_req *p)
8222 {
8223 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8224 	struct sk_buff *skb;
8225 	unsigned int cond;
8226 
8227 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
8228 	if (!skb) {
8229 		rtw89_err(rtwdev,
8230 			  "failed to alloc skb for add mcc\n");
8231 		return -ENOMEM;
8232 	}
8233 
8234 	skb_put(skb, H2C_ADD_MCC_LEN);
8235 	RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
8236 	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
8237 	RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
8238 	RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
8239 	RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
8240 	RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
8241 	RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
8242 	RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
8243 	RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
8244 	RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
8245 	RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
8246 	RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
8247 	RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
8248 	RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
8249 	RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
8250 	RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
8251 	RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
8252 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
8253 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
8254 	RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
8255 
8256 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8257 			      H2C_CAT_MAC,
8258 			      H2C_CL_MCC,
8259 			      H2C_FUNC_ADD_MCC, 0, 0,
8260 			      H2C_ADD_MCC_LEN);
8261 
8262 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
8263 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8264 }
8265 
8266 #define H2C_START_MCC_LEN 12
8267 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
8268 			   const struct rtw89_fw_mcc_start_req *p)
8269 {
8270 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8271 	struct sk_buff *skb;
8272 	unsigned int cond;
8273 
8274 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
8275 	if (!skb) {
8276 		rtw89_err(rtwdev,
8277 			  "failed to alloc skb for start mcc\n");
8278 		return -ENOMEM;
8279 	}
8280 
8281 	skb_put(skb, H2C_START_MCC_LEN);
8282 	RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
8283 	RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
8284 	RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
8285 	RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
8286 	RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
8287 	RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
8288 	RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
8289 	RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
8290 	RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
8291 
8292 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8293 			      H2C_CAT_MAC,
8294 			      H2C_CL_MCC,
8295 			      H2C_FUNC_START_MCC, 0, 0,
8296 			      H2C_START_MCC_LEN);
8297 
8298 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
8299 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8300 }
8301 
8302 #define H2C_STOP_MCC_LEN 4
8303 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
8304 			  bool prev_groups)
8305 {
8306 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8307 	struct sk_buff *skb;
8308 	unsigned int cond;
8309 
8310 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
8311 	if (!skb) {
8312 		rtw89_err(rtwdev,
8313 			  "failed to alloc skb for stop mcc\n");
8314 		return -ENOMEM;
8315 	}
8316 
8317 	skb_put(skb, H2C_STOP_MCC_LEN);
8318 	RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
8319 	RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
8320 	RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
8321 
8322 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8323 			      H2C_CAT_MAC,
8324 			      H2C_CL_MCC,
8325 			      H2C_FUNC_STOP_MCC, 0, 0,
8326 			      H2C_STOP_MCC_LEN);
8327 
8328 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
8329 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8330 }
8331 
8332 #define H2C_DEL_MCC_GROUP_LEN 4
8333 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
8334 			       bool prev_groups)
8335 {
8336 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8337 	struct sk_buff *skb;
8338 	unsigned int cond;
8339 
8340 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
8341 	if (!skb) {
8342 		rtw89_err(rtwdev,
8343 			  "failed to alloc skb for del mcc group\n");
8344 		return -ENOMEM;
8345 	}
8346 
8347 	skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
8348 	RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
8349 	RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
8350 
8351 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8352 			      H2C_CAT_MAC,
8353 			      H2C_CL_MCC,
8354 			      H2C_FUNC_DEL_MCC_GROUP, 0, 0,
8355 			      H2C_DEL_MCC_GROUP_LEN);
8356 
8357 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
8358 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8359 }
8360 
8361 #define H2C_RESET_MCC_GROUP_LEN 4
8362 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
8363 {
8364 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8365 	struct sk_buff *skb;
8366 	unsigned int cond;
8367 
8368 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
8369 	if (!skb) {
8370 		rtw89_err(rtwdev,
8371 			  "failed to alloc skb for reset mcc group\n");
8372 		return -ENOMEM;
8373 	}
8374 
8375 	skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
8376 	RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
8377 
8378 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8379 			      H2C_CAT_MAC,
8380 			      H2C_CL_MCC,
8381 			      H2C_FUNC_RESET_MCC_GROUP, 0, 0,
8382 			      H2C_RESET_MCC_GROUP_LEN);
8383 
8384 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
8385 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8386 }
8387 
8388 #define H2C_MCC_REQ_TSF_LEN 4
8389 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
8390 			     const struct rtw89_fw_mcc_tsf_req *req,
8391 			     struct rtw89_mac_mcc_tsf_rpt *rpt)
8392 {
8393 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8394 	struct rtw89_mac_mcc_tsf_rpt *tmp;
8395 	struct sk_buff *skb;
8396 	unsigned int cond;
8397 	int ret;
8398 
8399 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
8400 	if (!skb) {
8401 		rtw89_err(rtwdev,
8402 			  "failed to alloc skb for mcc req tsf\n");
8403 		return -ENOMEM;
8404 	}
8405 
8406 	skb_put(skb, H2C_MCC_REQ_TSF_LEN);
8407 	RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
8408 	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
8409 	RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
8410 
8411 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8412 			      H2C_CAT_MAC,
8413 			      H2C_CL_MCC,
8414 			      H2C_FUNC_MCC_REQ_TSF, 0, 0,
8415 			      H2C_MCC_REQ_TSF_LEN);
8416 
8417 	cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
8418 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8419 	if (ret)
8420 		return ret;
8421 
8422 	tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
8423 	*rpt = *tmp;
8424 
8425 	return 0;
8426 }
8427 
8428 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
8429 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid,
8430 				  u8 *bitmap)
8431 {
8432 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8433 	struct sk_buff *skb;
8434 	unsigned int cond;
8435 	u8 map_len;
8436 	u8 h2c_len;
8437 
8438 	BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
8439 	map_len = RTW89_MAX_MAC_ID_NUM / 8;
8440 	h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
8441 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
8442 	if (!skb) {
8443 		rtw89_err(rtwdev,
8444 			  "failed to alloc skb for mcc macid bitmap\n");
8445 		return -ENOMEM;
8446 	}
8447 
8448 	skb_put(skb, h2c_len);
8449 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
8450 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
8451 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
8452 	RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
8453 
8454 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8455 			      H2C_CAT_MAC,
8456 			      H2C_CL_MCC,
8457 			      H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
8458 			      h2c_len);
8459 
8460 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
8461 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8462 }
8463 
8464 #define H2C_MCC_SYNC_LEN 4
8465 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
8466 			  u8 target, u8 offset)
8467 {
8468 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8469 	struct sk_buff *skb;
8470 	unsigned int cond;
8471 
8472 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
8473 	if (!skb) {
8474 		rtw89_err(rtwdev,
8475 			  "failed to alloc skb for mcc sync\n");
8476 		return -ENOMEM;
8477 	}
8478 
8479 	skb_put(skb, H2C_MCC_SYNC_LEN);
8480 	RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
8481 	RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
8482 	RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
8483 	RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
8484 
8485 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8486 			      H2C_CAT_MAC,
8487 			      H2C_CL_MCC,
8488 			      H2C_FUNC_MCC_SYNC, 0, 0,
8489 			      H2C_MCC_SYNC_LEN);
8490 
8491 	cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
8492 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8493 }
8494 
8495 #define H2C_MCC_SET_DURATION_LEN 20
8496 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
8497 				  const struct rtw89_fw_mcc_duration *p)
8498 {
8499 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8500 	struct sk_buff *skb;
8501 	unsigned int cond;
8502 
8503 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
8504 	if (!skb) {
8505 		rtw89_err(rtwdev,
8506 			  "failed to alloc skb for mcc set duration\n");
8507 		return -ENOMEM;
8508 	}
8509 
8510 	skb_put(skb, H2C_MCC_SET_DURATION_LEN);
8511 	RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
8512 	RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
8513 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
8514 	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
8515 	RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
8516 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
8517 						       p->start_tsf_low);
8518 	RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
8519 							p->start_tsf_high);
8520 	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
8521 	RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
8522 
8523 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8524 			      H2C_CAT_MAC,
8525 			      H2C_CL_MCC,
8526 			      H2C_FUNC_MCC_SET_DURATION, 0, 0,
8527 			      H2C_MCC_SET_DURATION_LEN);
8528 
8529 	cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
8530 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8531 }
8532 
8533 static
8534 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
8535 			      const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
8536 			      struct rtw89_h2c_mrc_add_slot *slot_h2c)
8537 {
8538 	bool fill_h2c = !!slot_h2c;
8539 	unsigned int i;
8540 
8541 	if (!fill_h2c)
8542 		goto calc_len;
8543 
8544 	slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
8545 					RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
8546 		       le32_encode_bits(slot_arg->courtesy_en,
8547 					RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
8548 		       le32_encode_bits(slot_arg->role_num,
8549 					RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
8550 	slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
8551 					RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
8552 		       le32_encode_bits(slot_arg->courtesy_target,
8553 					RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
8554 
8555 	for (i = 0; i < slot_arg->role_num; i++) {
8556 		slot_h2c->roles[i].w0 =
8557 			le32_encode_bits(slot_arg->roles[i].macid,
8558 					 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
8559 			le32_encode_bits(slot_arg->roles[i].role_type,
8560 					 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
8561 			le32_encode_bits(slot_arg->roles[i].is_master,
8562 					 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
8563 			le32_encode_bits(slot_arg->roles[i].en_tx_null,
8564 					 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
8565 			le32_encode_bits(false,
8566 					 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
8567 			le32_encode_bits(false,
8568 					 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
8569 		slot_h2c->roles[i].w1 =
8570 			le32_encode_bits(slot_arg->roles[i].central_ch,
8571 					 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
8572 			le32_encode_bits(slot_arg->roles[i].primary_ch,
8573 					 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
8574 			le32_encode_bits(slot_arg->roles[i].bw,
8575 					 RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
8576 			le32_encode_bits(slot_arg->roles[i].band,
8577 					 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
8578 			le32_encode_bits(slot_arg->roles[i].null_early,
8579 					 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
8580 			le32_encode_bits(false,
8581 					 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
8582 			le32_encode_bits(true,
8583 					 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
8584 		slot_h2c->roles[i].macid_main_bitmap =
8585 			cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
8586 		slot_h2c->roles[i].macid_paired_bitmap =
8587 			cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
8588 	}
8589 
8590 calc_len:
8591 	return struct_size(slot_h2c, roles, slot_arg->role_num);
8592 }
8593 
8594 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
8595 			 const struct rtw89_fw_mrc_add_arg *arg)
8596 {
8597 	struct rtw89_h2c_mrc_add *h2c_head;
8598 	struct sk_buff *skb;
8599 	unsigned int i;
8600 	void *tmp;
8601 	u32 len;
8602 	int ret;
8603 
8604 	len = sizeof(*h2c_head);
8605 	for (i = 0; i < arg->slot_num; i++)
8606 		len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
8607 
8608 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8609 	if (!skb) {
8610 		rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
8611 		return -ENOMEM;
8612 	}
8613 
8614 	skb_put(skb, len);
8615 	tmp = skb->data;
8616 
8617 	h2c_head = tmp;
8618 	h2c_head->w0 = le32_encode_bits(arg->sch_idx,
8619 					RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
8620 		       le32_encode_bits(arg->sch_type,
8621 					RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
8622 		       le32_encode_bits(arg->slot_num,
8623 					RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
8624 		       le32_encode_bits(arg->btc_in_sch,
8625 					RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
8626 
8627 	tmp += sizeof(*h2c_head);
8628 	for (i = 0; i < arg->slot_num; i++)
8629 		tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
8630 
8631 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8632 			      H2C_CAT_MAC,
8633 			      H2C_CL_MRC,
8634 			      H2C_FUNC_ADD_MRC, 0, 0,
8635 			      len);
8636 
8637 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8638 	if (ret) {
8639 		rtw89_err(rtwdev, "failed to send h2c\n");
8640 		dev_kfree_skb_any(skb);
8641 		return -EBUSY;
8642 	}
8643 
8644 	return 0;
8645 }
8646 
8647 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
8648 			   const struct rtw89_fw_mrc_start_arg *arg)
8649 {
8650 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8651 	struct rtw89_h2c_mrc_start *h2c;
8652 	u32 len = sizeof(*h2c);
8653 	struct sk_buff *skb;
8654 	unsigned int cond;
8655 
8656 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8657 	if (!skb) {
8658 		rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
8659 		return -ENOMEM;
8660 	}
8661 
8662 	skb_put(skb, len);
8663 	h2c = (struct rtw89_h2c_mrc_start *)skb->data;
8664 
8665 	h2c->w0 = le32_encode_bits(arg->sch_idx,
8666 				   RTW89_H2C_MRC_START_W0_SCH_IDX) |
8667 		  le32_encode_bits(arg->old_sch_idx,
8668 				   RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
8669 		  le32_encode_bits(arg->action,
8670 				   RTW89_H2C_MRC_START_W0_ACTION);
8671 
8672 	h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
8673 	h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
8674 
8675 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8676 			      H2C_CAT_MAC,
8677 			      H2C_CL_MRC,
8678 			      H2C_FUNC_START_MRC, 0, 0,
8679 			      len);
8680 
8681 	cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
8682 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8683 }
8684 
8685 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx)
8686 {
8687 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8688 	struct rtw89_h2c_mrc_del *h2c;
8689 	u32 len = sizeof(*h2c);
8690 	struct sk_buff *skb;
8691 	unsigned int cond;
8692 
8693 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8694 	if (!skb) {
8695 		rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
8696 		return -ENOMEM;
8697 	}
8698 
8699 	skb_put(skb, len);
8700 	h2c = (struct rtw89_h2c_mrc_del *)skb->data;
8701 
8702 	h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) |
8703 		  le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX);
8704 
8705 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8706 			      H2C_CAT_MAC,
8707 			      H2C_CL_MRC,
8708 			      H2C_FUNC_DEL_MRC, 0, 0,
8709 			      len);
8710 
8711 	cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
8712 	return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8713 }
8714 
8715 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
8716 			     const struct rtw89_fw_mrc_req_tsf_arg *arg,
8717 			     struct rtw89_mac_mrc_tsf_rpt *rpt)
8718 {
8719 	struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8720 	struct rtw89_h2c_mrc_req_tsf *h2c;
8721 	struct rtw89_mac_mrc_tsf_rpt *tmp;
8722 	struct sk_buff *skb;
8723 	unsigned int i;
8724 	u32 len;
8725 	int ret;
8726 
8727 	len = struct_size(h2c, infos, arg->num);
8728 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8729 	if (!skb) {
8730 		rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
8731 		return -ENOMEM;
8732 	}
8733 
8734 	skb_put(skb, len);
8735 	h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
8736 
8737 	h2c->req_tsf_num = arg->num;
8738 	for (i = 0; i < arg->num; i++)
8739 		h2c->infos[i] =
8740 			u8_encode_bits(arg->infos[i].band,
8741 				       RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
8742 			u8_encode_bits(arg->infos[i].port,
8743 				       RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
8744 
8745 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8746 			      H2C_CAT_MAC,
8747 			      H2C_CL_MRC,
8748 			      H2C_FUNC_MRC_REQ_TSF, 0, 0,
8749 			      len);
8750 
8751 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
8752 	if (ret)
8753 		return ret;
8754 
8755 	tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
8756 	*rpt = *tmp;
8757 
8758 	return 0;
8759 }
8760 
8761 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
8762 				const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
8763 {
8764 	struct rtw89_h2c_mrc_upd_bitmap *h2c;
8765 	u32 len = sizeof(*h2c);
8766 	struct sk_buff *skb;
8767 	int ret;
8768 
8769 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8770 	if (!skb) {
8771 		rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
8772 		return -ENOMEM;
8773 	}
8774 
8775 	skb_put(skb, len);
8776 	h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
8777 
8778 	h2c->w0 = le32_encode_bits(arg->sch_idx,
8779 				   RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
8780 		  le32_encode_bits(arg->action,
8781 				   RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
8782 		  le32_encode_bits(arg->macid,
8783 				   RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
8784 	h2c->w1 = le32_encode_bits(arg->client_macid,
8785 				   RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
8786 
8787 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8788 			      H2C_CAT_MAC,
8789 			      H2C_CL_MRC,
8790 			      H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
8791 			      len);
8792 
8793 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8794 	if (ret) {
8795 		rtw89_err(rtwdev, "failed to send h2c\n");
8796 		dev_kfree_skb_any(skb);
8797 		return -EBUSY;
8798 	}
8799 
8800 	return 0;
8801 }
8802 
8803 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
8804 			  const struct rtw89_fw_mrc_sync_arg *arg)
8805 {
8806 	struct rtw89_h2c_mrc_sync *h2c;
8807 	u32 len = sizeof(*h2c);
8808 	struct sk_buff *skb;
8809 	int ret;
8810 
8811 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8812 	if (!skb) {
8813 		rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
8814 		return -ENOMEM;
8815 	}
8816 
8817 	skb_put(skb, len);
8818 	h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
8819 
8820 	h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
8821 		  le32_encode_bits(arg->src.port,
8822 				   RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
8823 		  le32_encode_bits(arg->src.band,
8824 				   RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
8825 		  le32_encode_bits(arg->dest.port,
8826 				   RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
8827 		  le32_encode_bits(arg->dest.band,
8828 				   RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
8829 	h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
8830 
8831 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8832 			      H2C_CAT_MAC,
8833 			      H2C_CL_MRC,
8834 			      H2C_FUNC_MRC_SYNC, 0, 0,
8835 			      len);
8836 
8837 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8838 	if (ret) {
8839 		rtw89_err(rtwdev, "failed to send h2c\n");
8840 		dev_kfree_skb_any(skb);
8841 		return -EBUSY;
8842 	}
8843 
8844 	return 0;
8845 }
8846 
8847 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
8848 				  const struct rtw89_fw_mrc_upd_duration_arg *arg)
8849 {
8850 	struct rtw89_h2c_mrc_upd_duration *h2c;
8851 	struct sk_buff *skb;
8852 	unsigned int i;
8853 	u32 len;
8854 	int ret;
8855 
8856 	len = struct_size(h2c, slots, arg->slot_num);
8857 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8858 	if (!skb) {
8859 		rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
8860 		return -ENOMEM;
8861 	}
8862 
8863 	skb_put(skb, len);
8864 	h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
8865 
8866 	h2c->w0 = le32_encode_bits(arg->sch_idx,
8867 				   RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
8868 		  le32_encode_bits(arg->slot_num,
8869 				   RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
8870 		  le32_encode_bits(false,
8871 				   RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
8872 
8873 	h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
8874 	h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
8875 
8876 	for (i = 0; i < arg->slot_num; i++) {
8877 		h2c->slots[i] =
8878 			le32_encode_bits(arg->slots[i].slot_idx,
8879 					 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
8880 			le32_encode_bits(arg->slots[i].duration,
8881 					 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
8882 	}
8883 
8884 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8885 			      H2C_CAT_MAC,
8886 			      H2C_CL_MRC,
8887 			      H2C_FUNC_MRC_UPD_DURATION, 0, 0,
8888 			      len);
8889 
8890 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8891 	if (ret) {
8892 		rtw89_err(rtwdev, "failed to send h2c\n");
8893 		dev_kfree_skb_any(skb);
8894 		return -EBUSY;
8895 	}
8896 
8897 	return 0;
8898 }
8899 
8900 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
8901 {
8902 	struct rtw89_h2c_ap_info *h2c;
8903 	u32 len = sizeof(*h2c);
8904 	struct sk_buff *skb;
8905 	int ret;
8906 
8907 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8908 	if (!skb) {
8909 		rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
8910 		return -ENOMEM;
8911 	}
8912 
8913 	skb_put(skb, len);
8914 	h2c = (struct rtw89_h2c_ap_info *)skb->data;
8915 
8916 	h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
8917 
8918 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8919 			      H2C_CAT_MAC,
8920 			      H2C_CL_AP,
8921 			      H2C_FUNC_AP_INFO, 0, 0,
8922 			      len);
8923 
8924 	ret = rtw89_h2c_tx(rtwdev, skb, false);
8925 	if (ret) {
8926 		rtw89_err(rtwdev, "failed to send h2c\n");
8927 		dev_kfree_skb_any(skb);
8928 		return -EBUSY;
8929 	}
8930 
8931 	return 0;
8932 }
8933 
8934 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
8935 {
8936 	int ret;
8937 
8938 	if (en) {
8939 		if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
8940 			return 0;
8941 	} else {
8942 		if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
8943 			return 0;
8944 	}
8945 
8946 	ret = rtw89_fw_h2c_ap_info(rtwdev, en);
8947 	if (ret) {
8948 		if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
8949 			return ret;
8950 
8951 		/* During recovery, neither driver nor stack has full error
8952 		 * handling, so show a warning, but return 0 with refcount
8953 		 * increased normally. It can avoid underflow when calling
8954 		 * with @en == false later.
8955 		 */
8956 		rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
8957 	}
8958 
8959 	if (en)
8960 		refcount_set(&rtwdev->refcount_ap_info, 1);
8961 
8962 	return 0;
8963 }
8964 
8965 int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
8966 			      bool enable)
8967 {
8968 	struct rtw89_wait_info *wait = &rtwdev->mlo.wait;
8969 	struct rtw89_h2c_mlo_link_cfg *h2c;
8970 	u8 mac_id = rtwvif_link->mac_id;
8971 	u32 len = sizeof(*h2c);
8972 	struct sk_buff *skb;
8973 	unsigned int cond;
8974 	int ret;
8975 
8976 	skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8977 	if (!skb) {
8978 		rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n");
8979 		return -ENOMEM;
8980 	}
8981 
8982 	skb_put(skb, len);
8983 	h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data;
8984 
8985 	h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) |
8986 		  le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION);
8987 
8988 	rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8989 			      H2C_CAT_MAC,
8990 			      H2C_CL_MLO,
8991 			      H2C_FUNC_MLO_LINK_CFG, 0, 0,
8992 			      len);
8993 
8994 	cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG);
8995 
8996 	ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8997 	if (ret) {
8998 		rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n",
8999 			  str_enable_disable(enable), rtwvif_link->link_id, ret);
9000 		return ret;
9001 	}
9002 
9003 	return 0;
9004 }
9005 
9006 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
9007 {
9008 	static const u8 zeros[U8_MAX] = {};
9009 
9010 	return memcmp(ext_ptr, zeros, ext_len) == 0;
9011 }
9012 
9013 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz)	\
9014 ({							\
9015 	u8 __var_sz = sizeof(*(e));			\
9016 	bool __accept;					\
9017 	if (__var_sz >= (ent_sz))			\
9018 		__accept = true;			\
9019 	else						\
9020 		__accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\
9021 						     (ent_sz) - __var_sz);\
9022 	__accept;					\
9023 })
9024 
9025 static bool
9026 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e,
9027 			    const void *cursor,
9028 			    const struct rtw89_txpwr_conf *conf)
9029 {
9030 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9031 		return false;
9032 
9033 	if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM)
9034 		return false;
9035 
9036 	switch (e->rs) {
9037 	case RTW89_RS_CCK:
9038 		if (e->shf + e->len > RTW89_RATE_CCK_NUM)
9039 			return false;
9040 		break;
9041 	case RTW89_RS_OFDM:
9042 		if (e->shf + e->len > RTW89_RATE_OFDM_NUM)
9043 			return false;
9044 		break;
9045 	case RTW89_RS_MCS:
9046 		if (e->shf + e->len > __RTW89_RATE_MCS_NUM ||
9047 		    e->nss >= RTW89_NSS_NUM ||
9048 		    e->ofdma >= RTW89_OFDMA_NUM)
9049 			return false;
9050 		break;
9051 	case RTW89_RS_HEDCM:
9052 		if (e->shf + e->len > RTW89_RATE_HEDCM_NUM ||
9053 		    e->nss >= RTW89_NSS_HEDCM_NUM ||
9054 		    e->ofdma >= RTW89_OFDMA_NUM)
9055 			return false;
9056 		break;
9057 	case RTW89_RS_OFFSET:
9058 		if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM)
9059 			return false;
9060 		break;
9061 	default:
9062 		return false;
9063 	}
9064 
9065 	return true;
9066 }
9067 
9068 static
9069 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev,
9070 				const struct rtw89_txpwr_table *tbl)
9071 {
9072 	const struct rtw89_txpwr_conf *conf = tbl->data;
9073 	struct rtw89_fw_txpwr_byrate_entry entry = {};
9074 	struct rtw89_txpwr_byrate *byr_head;
9075 	struct rtw89_rate_desc desc = {};
9076 	const void *cursor;
9077 	u32 data;
9078 	s8 *byr;
9079 	int i;
9080 
9081 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9082 		if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf))
9083 			continue;
9084 
9085 		byr_head = &rtwdev->byr[entry.band][entry.bw];
9086 		data = le32_to_cpu(entry.data);
9087 		desc.ofdma = entry.ofdma;
9088 		desc.nss = entry.nss;
9089 		desc.rs = entry.rs;
9090 
9091 		for (i = 0; i < entry.len; i++, data >>= 8) {
9092 			desc.idx = entry.shf + i;
9093 			byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
9094 			*byr = data & 0xff;
9095 		}
9096 	}
9097 }
9098 
9099 static bool
9100 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e,
9101 			      const void *cursor,
9102 			      const struct rtw89_txpwr_conf *conf)
9103 {
9104 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9105 		return false;
9106 
9107 	if (e->bw >= RTW89_2G_BW_NUM)
9108 		return false;
9109 	if (e->nt >= RTW89_NTX_NUM)
9110 		return false;
9111 	if (e->rs >= RTW89_RS_LMT_NUM)
9112 		return false;
9113 	if (e->bf >= RTW89_BF_NUM)
9114 		return false;
9115 	if (e->regd >= RTW89_REGD_NUM)
9116 		return false;
9117 	if (e->ch_idx >= RTW89_2G_CH_NUM)
9118 		return false;
9119 
9120 	return true;
9121 }
9122 
9123 static
9124 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data)
9125 {
9126 	const struct rtw89_txpwr_conf *conf = &data->conf;
9127 	struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {};
9128 	const void *cursor;
9129 
9130 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9131 		if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf))
9132 			continue;
9133 
9134 		data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
9135 		       [entry.ch_idx] = entry.v;
9136 	}
9137 }
9138 
9139 static bool
9140 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e,
9141 			      const void *cursor,
9142 			      const struct rtw89_txpwr_conf *conf)
9143 {
9144 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9145 		return false;
9146 
9147 	if (e->bw >= RTW89_5G_BW_NUM)
9148 		return false;
9149 	if (e->nt >= RTW89_NTX_NUM)
9150 		return false;
9151 	if (e->rs >= RTW89_RS_LMT_NUM)
9152 		return false;
9153 	if (e->bf >= RTW89_BF_NUM)
9154 		return false;
9155 	if (e->regd >= RTW89_REGD_NUM)
9156 		return false;
9157 	if (e->ch_idx >= RTW89_5G_CH_NUM)
9158 		return false;
9159 
9160 	return true;
9161 }
9162 
9163 static
9164 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data)
9165 {
9166 	const struct rtw89_txpwr_conf *conf = &data->conf;
9167 	struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {};
9168 	const void *cursor;
9169 
9170 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9171 		if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf))
9172 			continue;
9173 
9174 		data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
9175 		       [entry.ch_idx] = entry.v;
9176 	}
9177 }
9178 
9179 static bool
9180 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e,
9181 			      const void *cursor,
9182 			      const struct rtw89_txpwr_conf *conf)
9183 {
9184 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9185 		return false;
9186 
9187 	if (e->bw >= RTW89_6G_BW_NUM)
9188 		return false;
9189 	if (e->nt >= RTW89_NTX_NUM)
9190 		return false;
9191 	if (e->rs >= RTW89_RS_LMT_NUM)
9192 		return false;
9193 	if (e->bf >= RTW89_BF_NUM)
9194 		return false;
9195 	if (e->regd >= RTW89_REGD_NUM)
9196 		return false;
9197 	if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
9198 		return false;
9199 	if (e->ch_idx >= RTW89_6G_CH_NUM)
9200 		return false;
9201 
9202 	return true;
9203 }
9204 
9205 static
9206 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data)
9207 {
9208 	const struct rtw89_txpwr_conf *conf = &data->conf;
9209 	struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {};
9210 	const void *cursor;
9211 
9212 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9213 		if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf))
9214 			continue;
9215 
9216 		data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
9217 		       [entry.reg_6ghz_power][entry.ch_idx] = entry.v;
9218 	}
9219 }
9220 
9221 static bool
9222 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e,
9223 				 const void *cursor,
9224 				 const struct rtw89_txpwr_conf *conf)
9225 {
9226 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9227 		return false;
9228 
9229 	if (e->ru >= RTW89_RU_NUM)
9230 		return false;
9231 	if (e->nt >= RTW89_NTX_NUM)
9232 		return false;
9233 	if (e->regd >= RTW89_REGD_NUM)
9234 		return false;
9235 	if (e->ch_idx >= RTW89_2G_CH_NUM)
9236 		return false;
9237 
9238 	return true;
9239 }
9240 
9241 static
9242 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data)
9243 {
9244 	const struct rtw89_txpwr_conf *conf = &data->conf;
9245 	struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {};
9246 	const void *cursor;
9247 
9248 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9249 		if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf))
9250 			continue;
9251 
9252 		data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
9253 	}
9254 }
9255 
9256 static bool
9257 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e,
9258 				 const void *cursor,
9259 				 const struct rtw89_txpwr_conf *conf)
9260 {
9261 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9262 		return false;
9263 
9264 	if (e->ru >= RTW89_RU_NUM)
9265 		return false;
9266 	if (e->nt >= RTW89_NTX_NUM)
9267 		return false;
9268 	if (e->regd >= RTW89_REGD_NUM)
9269 		return false;
9270 	if (e->ch_idx >= RTW89_5G_CH_NUM)
9271 		return false;
9272 
9273 	return true;
9274 }
9275 
9276 static
9277 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data)
9278 {
9279 	const struct rtw89_txpwr_conf *conf = &data->conf;
9280 	struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {};
9281 	const void *cursor;
9282 
9283 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9284 		if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf))
9285 			continue;
9286 
9287 		data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
9288 	}
9289 }
9290 
9291 static bool
9292 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e,
9293 				 const void *cursor,
9294 				 const struct rtw89_txpwr_conf *conf)
9295 {
9296 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9297 		return false;
9298 
9299 	if (e->ru >= RTW89_RU_NUM)
9300 		return false;
9301 	if (e->nt >= RTW89_NTX_NUM)
9302 		return false;
9303 	if (e->regd >= RTW89_REGD_NUM)
9304 		return false;
9305 	if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
9306 		return false;
9307 	if (e->ch_idx >= RTW89_6G_CH_NUM)
9308 		return false;
9309 
9310 	return true;
9311 }
9312 
9313 static
9314 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data)
9315 {
9316 	const struct rtw89_txpwr_conf *conf = &data->conf;
9317 	struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {};
9318 	const void *cursor;
9319 
9320 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9321 		if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf))
9322 			continue;
9323 
9324 		data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power]
9325 		       [entry.ch_idx] = entry.v;
9326 	}
9327 }
9328 
9329 static bool
9330 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e,
9331 			    const void *cursor,
9332 			    const struct rtw89_txpwr_conf *conf)
9333 {
9334 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9335 		return false;
9336 
9337 	if (e->band >= RTW89_BAND_NUM)
9338 		return false;
9339 	if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM)
9340 		return false;
9341 	if (e->regd >= RTW89_REGD_NUM)
9342 		return false;
9343 
9344 	return true;
9345 }
9346 
9347 static
9348 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data)
9349 {
9350 	const struct rtw89_txpwr_conf *conf = &data->conf;
9351 	struct rtw89_fw_tx_shape_lmt_entry entry = {};
9352 	const void *cursor;
9353 
9354 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9355 		if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf))
9356 			continue;
9357 
9358 		data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v;
9359 	}
9360 }
9361 
9362 static bool
9363 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e,
9364 			       const void *cursor,
9365 			       const struct rtw89_txpwr_conf *conf)
9366 {
9367 	if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9368 		return false;
9369 
9370 	if (e->band >= RTW89_BAND_NUM)
9371 		return false;
9372 	if (e->regd >= RTW89_REGD_NUM)
9373 		return false;
9374 
9375 	return true;
9376 }
9377 
9378 static
9379 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
9380 {
9381 	const struct rtw89_txpwr_conf *conf = &data->conf;
9382 	struct rtw89_fw_tx_shape_lmt_ru_entry entry = {};
9383 	const void *cursor;
9384 
9385 	rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9386 		if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf))
9387 			continue;
9388 
9389 		data->v[entry.band][entry.regd] = entry.v;
9390 	}
9391 }
9392 
9393 static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev,
9394 					const struct rtw89_rfe_parms *parms)
9395 {
9396 	const struct rtw89_chip_info *chip = rtwdev->chip;
9397 
9398 	if (chip->support_bands & BIT(NL80211_BAND_2GHZ) &&
9399 	    !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru))
9400 		return false;
9401 
9402 	if (chip->support_bands & BIT(NL80211_BAND_5GHZ) &&
9403 	    !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru))
9404 		return false;
9405 
9406 	if (chip->support_bands & BIT(NL80211_BAND_6GHZ) &&
9407 	    !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru))
9408 		return false;
9409 
9410 	return true;
9411 }
9412 
9413 const struct rtw89_rfe_parms *
9414 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
9415 			    const struct rtw89_rfe_parms *init)
9416 {
9417 	struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data;
9418 	struct rtw89_rfe_parms *parms;
9419 
9420 	if (!rfe_data)
9421 		return init;
9422 
9423 	parms = &rfe_data->rfe_parms;
9424 	if (init)
9425 		*parms = *init;
9426 
9427 	if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) {
9428 		rfe_data->byrate.tbl.data = &rfe_data->byrate.conf;
9429 		rfe_data->byrate.tbl.size = 0; /* don't care here */
9430 		rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate;
9431 		parms->byr_tbl = &rfe_data->byrate.tbl;
9432 	}
9433 
9434 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) {
9435 		rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz);
9436 		parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v;
9437 	}
9438 
9439 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) {
9440 		rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz);
9441 		parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v;
9442 	}
9443 
9444 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) {
9445 		rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz);
9446 		parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
9447 	}
9448 
9449 	if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) {
9450 		rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz);
9451 		parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v;
9452 	}
9453 
9454 	if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) {
9455 		rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz);
9456 		parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v;
9457 	}
9458 
9459 	if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) {
9460 		rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz);
9461 		parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v;
9462 	}
9463 
9464 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
9465 		rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
9466 		parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
9467 	}
9468 
9469 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) {
9470 		rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz);
9471 		parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v;
9472 	}
9473 
9474 	if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) {
9475 		rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz);
9476 		parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
9477 	}
9478 
9479 	if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) {
9480 		rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz);
9481 		parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v;
9482 	}
9483 
9484 	if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) {
9485 		rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz);
9486 		parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v;
9487 	}
9488 
9489 	if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) {
9490 		rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz);
9491 		parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v;
9492 	}
9493 
9494 	if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
9495 		rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
9496 		parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
9497 	}
9498 
9499 	if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) {
9500 		rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru);
9501 		parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
9502 	}
9503 
9504 	parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms);
9505 
9506 	return parms;
9507 }
9508