1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5 #include <linux/if_arp.h>
6 #include "cam.h"
7 #include "chan.h"
8 #include "coex.h"
9 #include "debug.h"
10 #include "fw.h"
11 #include "mac.h"
12 #include "phy.h"
13 #include "ps.h"
14 #include "reg.h"
15 #include "util.h"
16 #include "wow.h"
17
18 struct rtw89_eapol_2_of_2 {
19 u8 gtkbody[14];
20 u8 key_des_ver;
21 u8 rsvd[92];
22 } __packed;
23
24 struct rtw89_sa_query {
25 u8 category;
26 u8 action;
27 } __packed;
28
29 struct rtw89_arp_rsp {
30 u8 llc_hdr[sizeof(rfc1042_header)];
31 __be16 llc_type;
32 struct arphdr arp_hdr;
33 u8 sender_hw[ETH_ALEN];
34 __be32 sender_ip;
35 u8 target_hw[ETH_ALEN];
36 __be32 target_ip;
37 } __packed;
38
39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
40
41 const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = {
42 .ver = 0x00,
43 .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
44 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
45 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
46 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
47 },
48 };
49 EXPORT_SYMBOL(rtw89_fw_blacklist_default);
50
51 union rtw89_fw_element_arg {
52 size_t offset;
53 enum rtw89_rf_path rf_path;
54 enum rtw89_fw_type fw_type;
55 };
56
57 struct rtw89_fw_element_handler {
58 int (*fn)(struct rtw89_dev *rtwdev,
59 const struct rtw89_fw_element_hdr *elm,
60 const union rtw89_fw_element_arg arg);
61 const union rtw89_fw_element_arg arg;
62 const char *name;
63 };
64
65 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
66 struct sk_buff *skb);
67 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
68 struct rtw89_wait_info *wait, unsigned int cond);
69 static int __parse_security_section(struct rtw89_dev *rtwdev,
70 struct rtw89_fw_bin_info *info,
71 struct rtw89_fw_hdr_section_info *section_info,
72 const void *content,
73 u32 *mssc_len);
74
rtw89_fw_h2c_alloc_skb(struct rtw89_dev * rtwdev,u32 len,bool header)75 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
76 bool header)
77 {
78 struct sk_buff *skb;
79 u32 header_len = 0;
80 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
81
82 if (header)
83 header_len = H2C_HEADER_LEN;
84
85 skb = dev_alloc_skb(len + header_len + h2c_desc_size);
86 if (!skb)
87 return NULL;
88 skb_reserve(skb, header_len + h2c_desc_size);
89 memset(skb->data, 0, len);
90
91 return skb;
92 }
93
rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev * rtwdev,u32 len)94 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
95 {
96 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
97 }
98
rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev * rtwdev,u32 len)99 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
100 {
101 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
102 }
103
rtw89_fw_check_rdy(struct rtw89_dev * rtwdev,enum rtw89_fwdl_check_type type)104 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
105 {
106 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
107 u8 val;
108 int ret;
109
110 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val,
111 val == RTW89_FWDL_WCPU_FW_INIT_RDY,
112 1, FWDL_WAIT_CNT, false, rtwdev, type);
113 if (ret) {
114 switch (val) {
115 case RTW89_FWDL_CHECKSUM_FAIL:
116 rtw89_err(rtwdev, "fw checksum fail\n");
117 return -EINVAL;
118
119 case RTW89_FWDL_SECURITY_FAIL:
120 rtw89_err(rtwdev, "fw security fail\n");
121 return -EINVAL;
122
123 case RTW89_FWDL_CV_NOT_MATCH:
124 rtw89_err(rtwdev, "fw cv not match\n");
125 return -EINVAL;
126
127 default:
128 rtw89_err(rtwdev, "fw unexpected status %d\n", val);
129 return -EBUSY;
130 }
131 }
132
133 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
134
135 return 0;
136 }
137
rtw89_fw_hdr_parser_v0(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)138 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
139 struct rtw89_fw_bin_info *info)
140 {
141 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
142 const struct rtw89_chip_info *chip = rtwdev->chip;
143 struct rtw89_fw_hdr_section_info *section_info;
144 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
145 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
146 const struct rtw89_fw_hdr_section *section;
147 const u8 *fw_end = fw + len;
148 const u8 *bin;
149 u32 base_hdr_len;
150 u32 mssc_len;
151 int ret;
152 u32 i;
153
154 if (!info)
155 return -EINVAL;
156
157 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
158 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
159 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
160 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE);
161
162 if (info->dynamic_hdr_en) {
163 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
164 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
165 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
166 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
167 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
168 return -EINVAL;
169 }
170 } else {
171 info->hdr_len = base_hdr_len;
172 info->dynamic_hdr_len = 0;
173 }
174
175 bin = fw + info->hdr_len;
176
177 /* jump to section header */
178 section_info = info->section_info;
179 for (i = 0; i < info->section_num; i++) {
180 section = &fw_hdr->sections[i];
181 section_info->type =
182 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
183 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
184
185 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
186 section_info->len += FWDL_SECTION_CHKSUM_LEN;
187 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
188 section_info->dladdr =
189 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
190 section_info->addr = bin;
191
192 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
193 section_info->mssc =
194 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
195
196 ret = __parse_security_section(rtwdev, info, section_info,
197 bin, &mssc_len);
198 if (ret)
199 return ret;
200
201 if (sec->secure_boot && chip->chip_id == RTL8852B)
202 section_info->len_override = 960;
203 } else {
204 section_info->mssc = 0;
205 mssc_len = 0;
206 }
207
208 rtw89_debug(rtwdev, RTW89_DBG_FW,
209 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
210 i, section_info->type, section_info->len,
211 section_info->mssc, mssc_len, bin - fw);
212 rtw89_debug(rtwdev, RTW89_DBG_FW,
213 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
214 section_info->ignore, section_info->key_addr,
215 section_info->key_addr ?
216 section_info->key_addr - section_info->addr : 0,
217 section_info->key_len, section_info->key_idx);
218
219 bin += section_info->len + mssc_len;
220 section_info++;
221 }
222
223 if (fw_end != bin) {
224 rtw89_err(rtwdev, "[ERR]fw bin size\n");
225 return -EINVAL;
226 }
227
228 return 0;
229 }
230
__get_mssc_key_idx(struct rtw89_dev * rtwdev,const struct rtw89_fw_mss_pool_hdr * mss_hdr,u32 rmp_tbl_size,u32 * key_idx)231 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
232 const struct rtw89_fw_mss_pool_hdr *mss_hdr,
233 u32 rmp_tbl_size, u32 *key_idx)
234 {
235 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
236 u32 sel_byte_idx;
237 u32 mss_sel_idx;
238 u8 sel_bit_idx;
239 int i;
240
241 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
242 if (!mss_hdr->defen)
243 return -ENOENT;
244
245 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
246 sec->mss_key_num;
247 } else {
248 if (mss_hdr->defen)
249 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
250 else
251 mss_sel_idx = 0;
252 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
253 le16_to_cpu(mss_hdr->msscust_max) +
254 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
255 sec->mss_key_num;
256 }
257
258 sel_byte_idx = mss_sel_idx >> 3;
259 sel_bit_idx = mss_sel_idx & 0x7;
260
261 if (sel_byte_idx >= rmp_tbl_size)
262 return -EFAULT;
263
264 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
265 return -ENOENT;
266
267 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
268
269 for (i = 0; i < sel_byte_idx; i++)
270 *key_idx += hweight8(mss_hdr->rmp_tbl[i]);
271
272 return 0;
273 }
274
__parse_formatted_mssc(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content,u32 * mssc_len)275 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
276 struct rtw89_fw_bin_info *info,
277 struct rtw89_fw_hdr_section_info *section_info,
278 const void *content,
279 u32 *mssc_len)
280 {
281 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
282 const union rtw89_fw_section_mssc_content *section_content = content;
283 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
284 u32 rmp_tbl_size;
285 u32 key_sign_len;
286 u32 real_key_idx;
287 u32 sb_sel_ver;
288 int ret;
289
290 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
291 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
292 return -ENOENT;
293 }
294
295 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
296 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
297 le16_to_cpu(mss_hdr->msscust_max) *
298 mss_hdr->mssdev_max) >> 3;
299 if (mss_hdr->defen)
300 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
301 } else {
302 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
303 mss_hdr->rmpfmt);
304 return -EINVAL;
305 }
306
307 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
308 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
309 rmp_tbl_size, (int)sizeof(*mss_hdr),
310 le32_to_cpu(mss_hdr->key_raw_offset));
311 return -EINVAL;
312 }
313
314 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
315 if (!key_sign_len)
316 key_sign_len = 512;
317
318 if (info->dsp_checksum)
319 key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
320
321 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
322 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
323
324 if (!sec->secure_boot)
325 goto out;
326
327 sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v);
328 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
329 goto ignore;
330
331 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
332 if (ret)
333 goto ignore;
334
335 section_info->key_addr = content + section_info->len +
336 le32_to_cpu(mss_hdr->key_raw_offset) +
337 key_sign_len * real_key_idx;
338 section_info->key_len = key_sign_len;
339 section_info->key_idx = real_key_idx;
340
341 out:
342 if (info->secure_section_exist) {
343 section_info->ignore = true;
344 return 0;
345 }
346
347 info->secure_section_exist = true;
348
349 return 0;
350
351 ignore:
352 section_info->ignore = true;
353
354 return 0;
355 }
356
__check_secure_blacklist(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content)357 static int __check_secure_blacklist(struct rtw89_dev *rtwdev,
358 struct rtw89_fw_bin_info *info,
359 struct rtw89_fw_hdr_section_info *section_info,
360 const void *content)
361 {
362 const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist;
363 const union rtw89_fw_section_mssc_content *section_content = content;
364 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
365 u8 byte_idx;
366 u8 bit_mask;
367
368 if (!sec->secure_boot)
369 return 0;
370
371 if (!info->secure_section_exist || section_info->ignore)
372 return 0;
373
374 if (!chip_blacklist) {
375 rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n");
376 return -ENOENT;
377 }
378
379 byte_idx = section_content->blacklist.bit_in_chip_list >> 3;
380 bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7);
381
382 if (section_content->blacklist.ver > chip_blacklist->ver) {
383 rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n",
384 section_content->blacklist.ver, chip_blacklist->ver);
385 return -EINVAL;
386 }
387
388 if (chip_blacklist->list[byte_idx] & bit_mask) {
389 rtw89_warn(rtwdev, "firmware %u in chip blacklist\n",
390 section_content->blacklist.ver);
391 return -EPERM;
392 }
393
394 return 0;
395 }
396
__parse_security_section(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content,u32 * mssc_len)397 static int __parse_security_section(struct rtw89_dev *rtwdev,
398 struct rtw89_fw_bin_info *info,
399 struct rtw89_fw_hdr_section_info *section_info,
400 const void *content,
401 u32 *mssc_len)
402 {
403 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
404 int ret;
405
406 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) {
407 ret = __parse_formatted_mssc(rtwdev, info, section_info,
408 content, mssc_len);
409 if (ret)
410 return -EINVAL;
411 } else {
412 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
413 if (info->dsp_checksum)
414 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
415
416 if (sec->secure_boot) {
417 if (sec->mss_idx >= section_info->mssc) {
418 rtw89_err(rtwdev, "unexpected MSS %d >= %d\n",
419 sec->mss_idx, section_info->mssc);
420 return -EFAULT;
421 }
422 section_info->key_addr = content + section_info->len +
423 sec->mss_idx * FWDL_SECURITY_SIGLEN;
424 section_info->key_len = FWDL_SECURITY_SIGLEN;
425 }
426
427 info->secure_section_exist = true;
428 }
429
430 ret = __check_secure_blacklist(rtwdev, info, section_info, content);
431 WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n");
432
433 return 0;
434 }
435
rtw89_fw_hdr_parser_v1(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)436 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
437 struct rtw89_fw_bin_info *info)
438 {
439 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
440 struct rtw89_fw_hdr_section_info *section_info;
441 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
442 const struct rtw89_fw_hdr_section_v1 *section;
443 const u8 *fw_end = fw + len;
444 const u8 *bin;
445 u32 base_hdr_len;
446 u32 mssc_len;
447 int ret;
448 u32 i;
449
450 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
451 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
452 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
453 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
454 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE);
455
456 if (info->dynamic_hdr_en) {
457 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
458 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
459 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
460 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
461 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
462 return -EINVAL;
463 }
464 } else {
465 info->hdr_len = base_hdr_len;
466 info->dynamic_hdr_len = 0;
467 }
468
469 bin = fw + info->hdr_len;
470
471 /* jump to section header */
472 section_info = info->section_info;
473 for (i = 0; i < info->section_num; i++) {
474 section = &fw_hdr->sections[i];
475
476 section_info->type =
477 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
478 section_info->len =
479 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
480 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
481 section_info->len += FWDL_SECTION_CHKSUM_LEN;
482 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
483 section_info->dladdr =
484 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
485 section_info->addr = bin;
486
487 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
488 section_info->mssc =
489 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
490
491 ret = __parse_security_section(rtwdev, info, section_info,
492 bin, &mssc_len);
493 if (ret)
494 return ret;
495 } else {
496 section_info->mssc = 0;
497 mssc_len = 0;
498 }
499
500 rtw89_debug(rtwdev, RTW89_DBG_FW,
501 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
502 i, section_info->type, section_info->len,
503 section_info->mssc, mssc_len, bin - fw);
504 rtw89_debug(rtwdev, RTW89_DBG_FW,
505 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
506 section_info->ignore, section_info->key_addr,
507 section_info->key_addr ?
508 section_info->key_addr - section_info->addr : 0,
509 section_info->key_len, section_info->key_idx);
510
511 bin += section_info->len + mssc_len;
512 section_info++;
513 }
514
515 if (fw_end != bin) {
516 rtw89_err(rtwdev, "[ERR]fw bin size\n");
517 return -EINVAL;
518 }
519
520 if (!info->secure_section_exist)
521 rtw89_warn(rtwdev, "no firmware secure section\n");
522
523 return 0;
524 }
525
rtw89_fw_hdr_parser(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)526 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
527 const struct rtw89_fw_suit *fw_suit,
528 struct rtw89_fw_bin_info *info)
529 {
530 const u8 *fw = fw_suit->data;
531 u32 len = fw_suit->size;
532
533 if (!fw || !len) {
534 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
535 return -ENOENT;
536 }
537
538 switch (fw_suit->hdr_ver) {
539 case 0:
540 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
541 case 1:
542 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
543 default:
544 return -ENOENT;
545 }
546 }
547
548 static
rtw89_mfw_get_hdr_ptr(struct rtw89_dev * rtwdev,const struct firmware * firmware)549 const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev,
550 const struct firmware *firmware)
551 {
552 const struct rtw89_mfw_hdr *mfw_hdr;
553
554 if (sizeof(*mfw_hdr) > firmware->size)
555 return NULL;
556
557 mfw_hdr = (const struct rtw89_mfw_hdr *)firmware->data;
558
559 if (mfw_hdr->sig != RTW89_MFW_SIG)
560 return NULL;
561
562 return mfw_hdr;
563 }
564
rtw89_mfw_validate_hdr(struct rtw89_dev * rtwdev,const struct firmware * firmware,const struct rtw89_mfw_hdr * mfw_hdr)565 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev,
566 const struct firmware *firmware,
567 const struct rtw89_mfw_hdr *mfw_hdr)
568 {
569 const void *mfw = firmware->data;
570 u32 mfw_len = firmware->size;
571 u8 fw_nr = mfw_hdr->fw_nr;
572 const void *ptr;
573
574 if (fw_nr == 0) {
575 rtw89_err(rtwdev, "mfw header has no fw entry\n");
576 return -ENOENT;
577 }
578
579 ptr = &mfw_hdr->info[fw_nr];
580
581 if (ptr > mfw + mfw_len) {
582 rtw89_err(rtwdev, "mfw header out of address\n");
583 return -EFAULT;
584 }
585
586 return 0;
587 }
588
589 static
rtw89_mfw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit,bool nowarn)590 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
591 struct rtw89_fw_suit *fw_suit, bool nowarn)
592 {
593 struct rtw89_fw_info *fw_info = &rtwdev->fw;
594 const struct firmware *firmware = fw_info->req.firmware;
595 const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
596 const struct rtw89_mfw_hdr *mfw_hdr;
597 const u8 *mfw = firmware->data;
598 u32 mfw_len = firmware->size;
599 int ret;
600 int i;
601
602 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
603 if (!mfw_hdr) {
604 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
605 /* legacy firmware support normal type only */
606 if (type != RTW89_FW_NORMAL)
607 return -EINVAL;
608 fw_suit->data = mfw;
609 fw_suit->size = mfw_len;
610 return 0;
611 }
612
613 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
614 if (ret)
615 return ret;
616
617 for (i = 0; i < mfw_hdr->fw_nr; i++) {
618 tmp = &mfw_hdr->info[i];
619 if (tmp->type != type)
620 continue;
621
622 if (type == RTW89_FW_LOGFMT) {
623 mfw_info = tmp;
624 goto found;
625 }
626
627 /* Version order of WiFi firmware in firmware file are not in order,
628 * pass all firmware to find the equal or less but closest version.
629 */
630 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) {
631 if (!mfw_info || mfw_info->cv < tmp->cv)
632 mfw_info = tmp;
633 }
634 }
635
636 if (mfw_info)
637 goto found;
638
639 if (!nowarn)
640 rtw89_err(rtwdev, "no suitable firmware found\n");
641 return -ENOENT;
642
643 found:
644 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
645 fw_suit->size = le32_to_cpu(mfw_info->size);
646
647 if (fw_suit->data + fw_suit->size > mfw + mfw_len) {
648 rtw89_err(rtwdev, "fw_suit %d out of address\n", type);
649 return -EFAULT;
650 }
651
652 return 0;
653 }
654
rtw89_mfw_get_size(struct rtw89_dev * rtwdev)655 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
656 {
657 struct rtw89_fw_info *fw_info = &rtwdev->fw;
658 const struct firmware *firmware = fw_info->req.firmware;
659 const struct rtw89_mfw_info *mfw_info;
660 const struct rtw89_mfw_hdr *mfw_hdr;
661 u32 size;
662 int ret;
663
664 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
665 if (!mfw_hdr) {
666 rtw89_warn(rtwdev, "not mfw format\n");
667 return 0;
668 }
669
670 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
671 if (ret)
672 return ret;
673
674 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
675 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
676
677 return size;
678 }
679
rtw89_fw_update_ver_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr * hdr)680 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
681 struct rtw89_fw_suit *fw_suit,
682 const struct rtw89_fw_hdr *hdr)
683 {
684 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
685 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
686 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
687 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
688 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
689 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
690 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
691 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
692 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
693 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
694 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
695 }
696
rtw89_fw_update_ver_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr_v1 * hdr)697 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
698 struct rtw89_fw_suit *fw_suit,
699 const struct rtw89_fw_hdr_v1 *hdr)
700 {
701 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
702 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
703 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
704 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
705 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
706 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
707 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
708 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
709 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
710 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
711 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
712 }
713
rtw89_fw_update_ver(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit)714 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
715 enum rtw89_fw_type type,
716 struct rtw89_fw_suit *fw_suit)
717 {
718 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
719 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
720
721 if (type == RTW89_FW_LOGFMT)
722 return 0;
723
724 fw_suit->type = type;
725 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
726
727 switch (fw_suit->hdr_ver) {
728 case 0:
729 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
730 break;
731 case 1:
732 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
733 break;
734 default:
735 rtw89_err(rtwdev, "Unknown firmware header version %u\n",
736 fw_suit->hdr_ver);
737 return -ENOENT;
738 }
739
740 rtw89_info(rtwdev,
741 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
742 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
743 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
744
745 return 0;
746 }
747
748 static
__rtw89_fw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool nowarn)749 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
750 bool nowarn)
751 {
752 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
753 int ret;
754
755 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
756 if (ret)
757 return ret;
758
759 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
760 }
761
762 static
__rtw89_fw_recognize_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)763 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
764 const struct rtw89_fw_element_hdr *elm,
765 const union rtw89_fw_element_arg arg)
766 {
767 enum rtw89_fw_type type = arg.fw_type;
768 struct rtw89_hal *hal = &rtwdev->hal;
769 struct rtw89_fw_suit *fw_suit;
770
771 /* Version of BB MCU is in decreasing order in firmware file, so take
772 * first equal or less version, which is equal or less but closest version.
773 */
774 if (hal->cv < elm->u.bbmcu.cv)
775 return 1; /* ignore this element */
776
777 fw_suit = rtw89_fw_suit_get(rtwdev, type);
778 if (fw_suit->data)
779 return 1; /* ignore this element (a firmware is taken already) */
780
781 fw_suit->data = elm->u.bbmcu.contents;
782 fw_suit->size = le32_to_cpu(elm->size);
783
784 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
785 }
786
787 #define __DEF_FW_FEAT_COND(__cond, __op) \
788 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
789 { \
790 return suit_ver_code __op comp_ver_code; \
791 }
792
793 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
794 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
795 __DEF_FW_FEAT_COND(lt, <); /* less than */
796
797 struct __fw_feat_cfg {
798 enum rtw89_core_chip_id chip_id;
799 enum rtw89_fw_feature feature;
800 u32 ver_code;
801 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
802 };
803
804 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
805 { \
806 .chip_id = _chip, \
807 .feature = RTW89_FW_FEATURE_ ## _feat, \
808 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
809 .cond = __fw_feat_cond_ ## _cond, \
810 }
811
812 static const struct __fw_feat_cfg fw_feat_tbl[] = {
813 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
814 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
815 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER),
816 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
817 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
818 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
819 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
820 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX),
821 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
822 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
823 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
824 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER),
825 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
826 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
827 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
828 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
829 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
830 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER),
831 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
832 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
833 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
834 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
835 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
836 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
837 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
838 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
839 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
840 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
841 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
842 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
843 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
844 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
845 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
846 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
847 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
848 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
849 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
850 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
851 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
852 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
853 };
854
rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info * fw,const struct rtw89_chip_info * chip,u32 ver_code)855 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
856 const struct rtw89_chip_info *chip,
857 u32 ver_code)
858 {
859 int i;
860
861 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
862 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
863
864 if (chip->chip_id != ent->chip_id)
865 continue;
866
867 if (ent->cond(ver_code, ent->ver_code))
868 RTW89_SET_FW_FEATURE(ent->feature, fw);
869 }
870 }
871
rtw89_fw_recognize_features(struct rtw89_dev * rtwdev)872 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
873 {
874 const struct rtw89_chip_info *chip = rtwdev->chip;
875 const struct rtw89_fw_suit *fw_suit;
876 u32 suit_ver_code;
877
878 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
879 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
880
881 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
882 }
883
884 const struct firmware *
rtw89_early_fw_feature_recognize(struct device * device,const struct rtw89_chip_info * chip,struct rtw89_fw_info * early_fw,int * used_fw_format)885 rtw89_early_fw_feature_recognize(struct device *device,
886 const struct rtw89_chip_info *chip,
887 struct rtw89_fw_info *early_fw,
888 int *used_fw_format)
889 {
890 const struct firmware *firmware;
891 char fw_name[64];
892 int fw_format;
893 u32 ver_code;
894 int ret;
895
896 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
897 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
898 chip->fw_basename, fw_format);
899
900 ret = request_firmware(&firmware, fw_name, device);
901 if (!ret) {
902 dev_info(device, "loaded firmware %s\n", fw_name);
903 *used_fw_format = fw_format;
904 break;
905 }
906 }
907
908 if (ret) {
909 dev_err(device, "failed to early request firmware: %d\n", ret);
910 return NULL;
911 }
912
913 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
914
915 if (!ver_code)
916 goto out;
917
918 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
919
920 out:
921 return firmware;
922 }
923
rtw89_fw_validate_ver_required(struct rtw89_dev * rtwdev)924 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev)
925 {
926 const struct rtw89_chip_variant *variant = rtwdev->variant;
927 const struct rtw89_fw_suit *fw_suit;
928 u32 suit_ver_code;
929
930 if (!variant)
931 return 0;
932
933 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
934 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
935
936 if (variant->fw_min_ver_code > suit_ver_code) {
937 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n",
938 variant->fw_min_ver_code);
939 return -ENOENT;
940 }
941
942 return 0;
943 }
944
rtw89_fw_recognize(struct rtw89_dev * rtwdev)945 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
946 {
947 const struct rtw89_chip_info *chip = rtwdev->chip;
948 int ret;
949
950 if (chip->try_ce_fw) {
951 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
952 if (!ret)
953 goto normal_done;
954 }
955
956 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
957 if (ret)
958 return ret;
959
960 normal_done:
961 ret = rtw89_fw_validate_ver_required(rtwdev);
962 if (ret)
963 return ret;
964
965 /* It still works if wowlan firmware isn't existing. */
966 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
967
968 /* It still works if log format file isn't existing. */
969 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
970
971 rtw89_fw_recognize_features(rtwdev);
972
973 rtw89_coex_recognize_ver(rtwdev);
974
975 return 0;
976 }
977
978 static
rtw89_build_phy_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)979 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
980 const struct rtw89_fw_element_hdr *elm,
981 const union rtw89_fw_element_arg arg)
982 {
983 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
984 struct rtw89_phy_table *tbl;
985 struct rtw89_reg2_def *regs;
986 enum rtw89_rf_path rf_path;
987 u32 n_regs, i;
988 u8 idx;
989
990 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
991 if (!tbl)
992 return -ENOMEM;
993
994 switch (le32_to_cpu(elm->id)) {
995 case RTW89_FW_ELEMENT_ID_BB_REG:
996 elm_info->bb_tbl = tbl;
997 break;
998 case RTW89_FW_ELEMENT_ID_BB_GAIN:
999 elm_info->bb_gain = tbl;
1000 break;
1001 case RTW89_FW_ELEMENT_ID_RADIO_A:
1002 case RTW89_FW_ELEMENT_ID_RADIO_B:
1003 case RTW89_FW_ELEMENT_ID_RADIO_C:
1004 case RTW89_FW_ELEMENT_ID_RADIO_D:
1005 rf_path = arg.rf_path;
1006 idx = elm->u.reg2.idx;
1007
1008 elm_info->rf_radio[idx] = tbl;
1009 tbl->rf_path = rf_path;
1010 tbl->config = rtw89_phy_config_rf_reg_v1;
1011 break;
1012 case RTW89_FW_ELEMENT_ID_RF_NCTL:
1013 elm_info->rf_nctl = tbl;
1014 break;
1015 default:
1016 kfree(tbl);
1017 return -ENOENT;
1018 }
1019
1020 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
1021 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL);
1022 if (!regs)
1023 goto out;
1024
1025 for (i = 0; i < n_regs; i++) {
1026 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
1027 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
1028 }
1029
1030 tbl->n_regs = n_regs;
1031 tbl->regs = regs;
1032
1033 return 0;
1034
1035 out:
1036 kfree(tbl);
1037 return -ENOMEM;
1038 }
1039
1040 static
rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1041 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev,
1042 const struct rtw89_fw_element_hdr *elm,
1043 const union rtw89_fw_element_arg arg)
1044 {
1045 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr;
1046 const unsigned long offset = arg.offset;
1047 struct rtw89_efuse *efuse = &rtwdev->efuse;
1048 struct rtw89_txpwr_conf *conf;
1049
1050 if (!rtwdev->rfe_data) {
1051 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL);
1052 if (!rtwdev->rfe_data)
1053 return -ENOMEM;
1054 }
1055
1056 conf = (void *)rtwdev->rfe_data + offset;
1057
1058 /* if multiple matched, take the last eventually */
1059 if (txpwr_elm->rfe_type == efuse->rfe_type)
1060 goto setup;
1061
1062 /* without one is matched, accept default */
1063 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE &&
1064 (!rtw89_txpwr_conf_valid(conf) ||
1065 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE))
1066 goto setup;
1067
1068 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n",
1069 elm->id, txpwr_elm->rfe_type);
1070 return 0;
1071
1072 setup:
1073 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n",
1074 elm->id, txpwr_elm->rfe_type);
1075
1076 conf->rfe_type = txpwr_elm->rfe_type;
1077 conf->ent_sz = txpwr_elm->ent_sz;
1078 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents);
1079 conf->data = txpwr_elm->content;
1080 return 0;
1081 }
1082
1083 static
rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1084 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
1085 const struct rtw89_fw_element_hdr *elm,
1086 const union rtw89_fw_element_arg arg)
1087 {
1088 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1089 const struct rtw89_chip_info *chip = rtwdev->chip;
1090 u32 needed_bitmap = 0;
1091 u32 offset = 0;
1092 int subband;
1093 u32 bitmap;
1094 int type;
1095
1096 if (chip->support_bands & BIT(NL80211_BAND_6GHZ))
1097 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ;
1098 if (chip->support_bands & BIT(NL80211_BAND_5GHZ))
1099 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ;
1100 if (chip->support_bands & BIT(NL80211_BAND_2GHZ))
1101 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ;
1102
1103 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
1104
1105 if ((bitmap & needed_bitmap) != needed_bitmap) {
1106 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n",
1107 needed_bitmap, bitmap);
1108 return -ENOENT;
1109 }
1110
1111 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL);
1112 if (!elm_info->txpwr_trk)
1113 return -ENOMEM;
1114
1115 for (type = 0; bitmap; type++, bitmap >>= 1) {
1116 if (!(bitmap & BIT(0)))
1117 continue;
1118
1119 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START &&
1120 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX)
1121 subband = 4;
1122 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START &&
1123 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX)
1124 subband = 3;
1125 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START &&
1126 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX)
1127 subband = 1;
1128 else
1129 break;
1130
1131 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset];
1132
1133 offset += subband;
1134 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size))
1135 goto err;
1136 }
1137
1138 return 0;
1139
1140 err:
1141 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n",
1142 offset, le32_to_cpu(elm->size));
1143 kfree(elm_info->txpwr_trk);
1144 elm_info->txpwr_trk = NULL;
1145
1146 return -EFAULT;
1147 }
1148
1149 static
rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1150 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev,
1151 const struct rtw89_fw_element_hdr *elm,
1152 const union rtw89_fw_element_arg arg)
1153 {
1154 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1155 u8 rfk_id;
1156
1157 if (elm_info->rfk_log_fmt)
1158 goto allocated;
1159
1160 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL);
1161 if (!elm_info->rfk_log_fmt)
1162 return 1; /* this is an optional element, so just ignore this */
1163
1164 allocated:
1165 rfk_id = elm->u.rfk_log_fmt.rfk_id;
1166 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM)
1167 return 1;
1168
1169 elm_info->rfk_log_fmt->elm[rfk_id] = elm;
1170
1171 return 0;
1172 }
1173
rtw89_regd_entcpy(struct rtw89_regd * regd,const void * cursor,u8 cursor_size)1174 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor,
1175 u8 cursor_size)
1176 {
1177 /* fill default values if needed for backward compatibility */
1178 struct rtw89_fw_regd_entry entry = {
1179 .rule_2ghz = RTW89_NA,
1180 .rule_5ghz = RTW89_NA,
1181 .rule_6ghz = RTW89_NA,
1182 .fmap = cpu_to_le32(0x0),
1183 };
1184 u8 valid_size = min_t(u8, sizeof(entry), cursor_size);
1185 unsigned int i;
1186 u32 fmap;
1187
1188 memcpy(&entry, cursor, valid_size);
1189 memset(regd, 0, sizeof(*regd));
1190
1191 regd->alpha2[0] = entry.alpha2_0;
1192 regd->alpha2[1] = entry.alpha2_1;
1193 regd->alpha2[2] = '\0';
1194
1195 /* also need to consider forward compatibility */
1196 regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ?
1197 entry.rule_2ghz : RTW89_NA;
1198 regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ?
1199 entry.rule_5ghz : RTW89_NA;
1200 regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ?
1201 entry.rule_6ghz : RTW89_NA;
1202
1203 BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap));
1204 BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC);
1205
1206 fmap = le32_to_cpu(entry.fmap);
1207 for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) {
1208 if (fmap & BIT(i))
1209 set_bit(i, regd->func_bitmap);
1210 }
1211
1212 return true;
1213 }
1214
1215 #define rtw89_for_each_in_regd_element(regd, element) \
1216 for (const void *cursor = (element)->content, \
1217 *end = (element)->content + \
1218 le32_to_cpu((element)->num_ents) * (element)->ent_sz; \
1219 cursor < end; cursor += (element)->ent_sz) \
1220 if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz))
1221
1222 static
rtw89_recognize_regd_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1223 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
1224 const struct rtw89_fw_element_hdr *elm,
1225 const union rtw89_fw_element_arg arg)
1226 {
1227 const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd;
1228 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1229 u32 num_ents = le32_to_cpu(regd_elm->num_ents);
1230 struct rtw89_regd_data *p;
1231 struct rtw89_regd regd;
1232 u32 i = 0;
1233
1234 if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) {
1235 rtw89_warn(rtwdev,
1236 "regd element ents (%d) are over max num (%d)\n",
1237 num_ents, RTW89_REGD_MAX_COUNTRY_NUM);
1238 rtw89_warn(rtwdev,
1239 "regd element ignore and take another/common\n");
1240 return 1;
1241 }
1242
1243 if (elm_info->regd) {
1244 rtw89_debug(rtwdev, RTW89_DBG_REGD,
1245 "regd element take the latter\n");
1246 devm_kfree(rtwdev->dev, elm_info->regd);
1247 elm_info->regd = NULL;
1248 }
1249
1250 p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL);
1251 if (!p)
1252 return -ENOMEM;
1253
1254 p->nr = num_ents;
1255 rtw89_for_each_in_regd_element(®d, regd_elm)
1256 p->map[i++] = regd;
1257
1258 if (i != num_ents) {
1259 rtw89_err(rtwdev, "regd element has %d invalid ents\n",
1260 num_ents - i);
1261 devm_kfree(rtwdev->dev, p);
1262 return -EINVAL;
1263 }
1264
1265 elm_info->regd = p;
1266 return 0;
1267 }
1268
1269 static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
1270 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
1271 { .fw_type = RTW89_FW_BBMCU0 }, NULL},
1272 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
1273 { .fw_type = RTW89_FW_BBMCU1 }, NULL},
1274 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"},
1275 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL},
1276 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
1277 { .rf_path = RF_PATH_A }, "radio A"},
1278 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
1279 { .rf_path = RF_PATH_B }, NULL},
1280 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
1281 { .rf_path = RF_PATH_C }, NULL},
1282 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
1283 { .rf_path = RF_PATH_D }, NULL},
1284 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"},
1285 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = {
1286 rtw89_fw_recognize_txpwr_from_elm,
1287 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR",
1288 },
1289 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = {
1290 rtw89_fw_recognize_txpwr_from_elm,
1291 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL,
1292 },
1293 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = {
1294 rtw89_fw_recognize_txpwr_from_elm,
1295 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL,
1296 },
1297 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = {
1298 rtw89_fw_recognize_txpwr_from_elm,
1299 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
1300 },
1301 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
1302 rtw89_fw_recognize_txpwr_from_elm,
1303 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
1304 },
1305 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = {
1306 rtw89_fw_recognize_txpwr_from_elm,
1307 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL,
1308 },
1309 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = {
1310 rtw89_fw_recognize_txpwr_from_elm,
1311 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
1312 },
1313 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
1314 rtw89_fw_recognize_txpwr_from_elm,
1315 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
1316 },
1317 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = {
1318 rtw89_fw_recognize_txpwr_from_elm,
1319 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL,
1320 },
1321 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = {
1322 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK",
1323 },
1324 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
1325 rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
1326 },
1327 [RTW89_FW_ELEMENT_ID_REGD] = {
1328 rtw89_recognize_regd_from_elm, {}, "REGD",
1329 },
1330 };
1331
rtw89_fw_recognize_elements(struct rtw89_dev * rtwdev)1332 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
1333 {
1334 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1335 const struct firmware *firmware = fw_info->req.firmware;
1336 const struct rtw89_chip_info *chip = rtwdev->chip;
1337 u32 unrecognized_elements = chip->needed_fw_elms;
1338 const struct rtw89_fw_element_handler *handler;
1339 const struct rtw89_fw_element_hdr *hdr;
1340 u32 elm_size;
1341 u32 elem_id;
1342 u32 offset;
1343 int ret;
1344
1345 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
1346
1347 offset = rtw89_mfw_get_size(rtwdev);
1348 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1349 if (offset == 0)
1350 return -EINVAL;
1351
1352 while (offset + sizeof(*hdr) < firmware->size) {
1353 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
1354
1355 elm_size = le32_to_cpu(hdr->size);
1356 if (offset + elm_size >= firmware->size) {
1357 rtw89_warn(rtwdev, "firmware element size exceeds\n");
1358 break;
1359 }
1360
1361 elem_id = le32_to_cpu(hdr->id);
1362 if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
1363 goto next;
1364
1365 handler = &__fw_element_handlers[elem_id];
1366 if (!handler->fn)
1367 goto next;
1368
1369 ret = handler->fn(rtwdev, hdr, handler->arg);
1370 if (ret == 1) /* ignore this element */
1371 goto next;
1372 if (ret)
1373 return ret;
1374
1375 if (handler->name)
1376 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
1377 handler->name, hdr->ver);
1378
1379 unrecognized_elements &= ~BIT(elem_id);
1380 next:
1381 offset += sizeof(*hdr) + elm_size;
1382 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1383 }
1384
1385 if (unrecognized_elements) {
1386 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
1387 unrecognized_elements);
1388 return -ENOENT;
1389 }
1390
1391 return 0;
1392 }
1393
rtw89_h2c_pkt_set_hdr(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,bool rack,bool dack,u32 len)1394 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
1395 u8 type, u8 cat, u8 class, u8 func,
1396 bool rack, bool dack, u32 len)
1397 {
1398 struct fwcmd_hdr *hdr;
1399
1400 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1401
1402 if (!(rtwdev->fw.h2c_seq % 4))
1403 rack = true;
1404 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1405 FIELD_PREP(H2C_HDR_CAT, cat) |
1406 FIELD_PREP(H2C_HDR_CLASS, class) |
1407 FIELD_PREP(H2C_HDR_FUNC, func) |
1408 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1409
1410 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1411 len + H2C_HEADER_LEN) |
1412 (rack ? H2C_HDR_REC_ACK : 0) |
1413 (dack ? H2C_HDR_DONE_ACK : 0));
1414
1415 rtwdev->fw.h2c_seq++;
1416 }
1417
rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,u32 len)1418 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
1419 struct sk_buff *skb,
1420 u8 type, u8 cat, u8 class, u8 func,
1421 u32 len)
1422 {
1423 struct fwcmd_hdr *hdr;
1424
1425 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1426
1427 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1428 FIELD_PREP(H2C_HDR_CAT, cat) |
1429 FIELD_PREP(H2C_HDR_CLASS, class) |
1430 FIELD_PREP(H2C_HDR_FUNC, func) |
1431 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1432
1433 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1434 len + H2C_HEADER_LEN));
1435 }
1436
__rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr * fw_hdr)1437 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
1438 struct rtw89_fw_bin_info *info,
1439 struct rtw89_fw_hdr *fw_hdr)
1440 {
1441 struct rtw89_fw_hdr_section_info *section_info;
1442 struct rtw89_fw_hdr_section *section;
1443 int i;
1444
1445 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
1446 FW_HDR_W7_PART_SIZE);
1447
1448 for (i = 0; i < info->section_num; i++) {
1449 section_info = &info->section_info[i];
1450
1451 if (!section_info->len_override)
1452 continue;
1453
1454 section = &fw_hdr->sections[i];
1455 le32p_replace_bits(§ion->w1, section_info->len_override,
1456 FWSECTION_HDR_W1_SEC_SIZE);
1457 }
1458
1459 return 0;
1460 }
1461
__rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_v1 * fw_hdr)1462 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
1463 struct rtw89_fw_bin_info *info,
1464 struct rtw89_fw_hdr_v1 *fw_hdr)
1465 {
1466 struct rtw89_fw_hdr_section_info *section_info;
1467 struct rtw89_fw_hdr_section_v1 *section;
1468 u8 dst_sec_idx = 0;
1469 u8 sec_idx;
1470
1471 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
1472 FW_HDR_V1_W7_PART_SIZE);
1473
1474 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
1475 section_info = &info->section_info[sec_idx];
1476 section = &fw_hdr->sections[sec_idx];
1477
1478 if (section_info->ignore)
1479 continue;
1480
1481 if (dst_sec_idx != sec_idx)
1482 fw_hdr->sections[dst_sec_idx] = *section;
1483
1484 dst_sec_idx++;
1485 }
1486
1487 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
1488
1489 return (info->section_num - dst_sec_idx) * sizeof(*section);
1490 }
1491
__rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1492 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1493 const struct rtw89_fw_suit *fw_suit,
1494 struct rtw89_fw_bin_info *info)
1495 {
1496 u32 len = info->hdr_len - info->dynamic_hdr_len;
1497 struct rtw89_fw_hdr_v1 *fw_hdr_v1;
1498 const u8 *fw = fw_suit->data;
1499 struct rtw89_fw_hdr *fw_hdr;
1500 struct sk_buff *skb;
1501 u32 truncated;
1502 u32 ret = 0;
1503
1504 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1505 if (!skb) {
1506 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
1507 return -ENOMEM;
1508 }
1509
1510 skb_put_data(skb, fw, len);
1511
1512 switch (fw_suit->hdr_ver) {
1513 case 0:
1514 fw_hdr = (struct rtw89_fw_hdr *)skb->data;
1515 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
1516 break;
1517 case 1:
1518 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
1519 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
1520 break;
1521 default:
1522 ret = -EOPNOTSUPP;
1523 goto fail;
1524 }
1525
1526 if (truncated) {
1527 len -= truncated;
1528 skb_trim(skb, len);
1529 }
1530
1531 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
1532 H2C_CAT_MAC, H2C_CL_MAC_FWDL,
1533 H2C_FUNC_MAC_FWHDR_DL, len);
1534
1535 ret = rtw89_h2c_tx(rtwdev, skb, false);
1536 if (ret) {
1537 rtw89_err(rtwdev, "failed to send h2c\n");
1538 goto fail;
1539 }
1540
1541 return 0;
1542 fail:
1543 dev_kfree_skb_any(skb);
1544
1545 return ret;
1546 }
1547
rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1548 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1549 const struct rtw89_fw_suit *fw_suit,
1550 struct rtw89_fw_bin_info *info)
1551 {
1552 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1553 int ret;
1554
1555 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
1556 if (ret) {
1557 rtw89_err(rtwdev, "[ERR]FW header download\n");
1558 return ret;
1559 }
1560
1561 ret = mac->fwdl_check_path_ready(rtwdev, false);
1562 if (ret) {
1563 rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
1564 return ret;
1565 }
1566
1567 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
1568 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
1569
1570 return 0;
1571 }
1572
__rtw89_fw_download_main(struct rtw89_dev * rtwdev,struct rtw89_fw_hdr_section_info * info)1573 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1574 struct rtw89_fw_hdr_section_info *info)
1575 {
1576 struct sk_buff *skb;
1577 const u8 *section = info->addr;
1578 u32 residue_len = info->len;
1579 bool copy_key = false;
1580 u32 pkt_len;
1581 int ret;
1582
1583 if (info->ignore)
1584 return 0;
1585
1586 if (info->len_override) {
1587 if (info->len_override > info->len)
1588 rtw89_warn(rtwdev, "override length %u larger than original %u\n",
1589 info->len_override, info->len);
1590 else
1591 residue_len = info->len_override;
1592 }
1593
1594 if (info->key_addr && info->key_len) {
1595 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len)
1596 rtw89_warn(rtwdev,
1597 "ignore to copy key data because of len %d, %d, %d, %d\n",
1598 info->len, FWDL_SECTION_PER_PKT_LEN,
1599 info->key_len, residue_len);
1600 else
1601 copy_key = true;
1602 }
1603
1604 while (residue_len) {
1605 if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
1606 pkt_len = FWDL_SECTION_PER_PKT_LEN;
1607 else
1608 pkt_len = residue_len;
1609
1610 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
1611 if (!skb) {
1612 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1613 return -ENOMEM;
1614 }
1615 skb_put_data(skb, section, pkt_len);
1616
1617 if (copy_key)
1618 memcpy(skb->data + pkt_len - info->key_len,
1619 info->key_addr, info->key_len);
1620
1621 ret = rtw89_h2c_tx(rtwdev, skb, true);
1622 if (ret) {
1623 rtw89_err(rtwdev, "failed to send h2c\n");
1624 goto fail;
1625 }
1626
1627 section += pkt_len;
1628 residue_len -= pkt_len;
1629 }
1630
1631 return 0;
1632 fail:
1633 dev_kfree_skb_any(skb);
1634
1635 return ret;
1636 }
1637
1638 static enum rtw89_fwdl_check_type
rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit)1639 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev,
1640 const struct rtw89_fw_suit *fw_suit)
1641 {
1642 switch (fw_suit->type) {
1643 case RTW89_FW_BBMCU0:
1644 return RTW89_FWDL_CHECK_BB0_FWDL_DONE;
1645 case RTW89_FW_BBMCU1:
1646 return RTW89_FWDL_CHECK_BB1_FWDL_DONE;
1647 default:
1648 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE;
1649 }
1650 }
1651
rtw89_fw_download_main(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1652 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1653 const struct rtw89_fw_suit *fw_suit,
1654 struct rtw89_fw_bin_info *info)
1655 {
1656 struct rtw89_fw_hdr_section_info *section_info = info->section_info;
1657 const struct rtw89_chip_info *chip = rtwdev->chip;
1658 enum rtw89_fwdl_check_type chk_type;
1659 u8 section_num = info->section_num;
1660 int ret;
1661
1662 while (section_num--) {
1663 ret = __rtw89_fw_download_main(rtwdev, section_info);
1664 if (ret)
1665 return ret;
1666 section_info++;
1667 }
1668
1669 if (chip->chip_gen == RTW89_CHIP_AX)
1670 return 0;
1671
1672 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit);
1673 ret = rtw89_fw_check_rdy(rtwdev, chk_type);
1674 if (ret) {
1675 rtw89_warn(rtwdev, "failed to download firmware type %u\n",
1676 fw_suit->type);
1677 return ret;
1678 }
1679
1680 return 0;
1681 }
1682
rtw89_fw_prog_cnt_dump(struct rtw89_dev * rtwdev)1683 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
1684 {
1685 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
1686 u32 addr = R_AX_DBG_PORT_SEL;
1687 u32 val32;
1688 u16 index;
1689
1690 if (chip_gen == RTW89_CHIP_BE) {
1691 addr = R_BE_WLCPU_PORT_PC;
1692 goto dump;
1693 }
1694
1695 rtw89_write32(rtwdev, R_AX_DBG_CTRL,
1696 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
1697 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
1698 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
1699
1700 dump:
1701 for (index = 0; index < 15; index++) {
1702 val32 = rtw89_read32(rtwdev, addr);
1703 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
1704 fsleep(10);
1705 }
1706 }
1707
rtw89_fw_dl_fail_dump(struct rtw89_dev * rtwdev)1708 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
1709 {
1710 u32 val32;
1711
1712 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
1713 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
1714
1715 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG);
1716 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32);
1717
1718 rtw89_fw_prog_cnt_dump(rtwdev);
1719 }
1720
rtw89_fw_download_suit(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit)1721 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
1722 struct rtw89_fw_suit *fw_suit)
1723 {
1724 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1725 struct rtw89_fw_bin_info info = {};
1726 int ret;
1727
1728 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
1729 if (ret) {
1730 rtw89_err(rtwdev, "parse fw header fail\n");
1731 return ret;
1732 }
1733
1734 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode);
1735
1736 if (rtwdev->chip->chip_id == RTL8922A &&
1737 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN))
1738 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000);
1739
1740 ret = mac->fwdl_check_path_ready(rtwdev, true);
1741 if (ret) {
1742 rtw89_err(rtwdev, "[ERR]H2C path ready\n");
1743 return ret;
1744 }
1745
1746 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
1747 if (ret)
1748 return ret;
1749
1750 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info);
1751 if (ret)
1752 return ret;
1753
1754 return 0;
1755 }
1756
1757 static
__rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1758 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1759 bool include_bb)
1760 {
1761 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1762 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1763 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
1764 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr;
1765 int ret;
1766 int i;
1767
1768 mac->disable_cpu(rtwdev);
1769 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb);
1770 if (ret)
1771 return ret;
1772
1773 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1774 if (ret)
1775 goto fwdl_err;
1776
1777 for (i = 0; i < bbmcu_nr && include_bb; i++) {
1778 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i);
1779
1780 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1781 if (ret)
1782 goto fwdl_err;
1783 }
1784
1785 fw_info->h2c_seq = 0;
1786 fw_info->rec_seq = 0;
1787 fw_info->h2c_counter = 0;
1788 fw_info->c2h_counter = 0;
1789 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
1790 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
1791
1792 mdelay(5);
1793
1794 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
1795 if (ret) {
1796 rtw89_warn(rtwdev, "download firmware fail\n");
1797 goto fwdl_err;
1798 }
1799
1800 return ret;
1801
1802 fwdl_err:
1803 rtw89_fw_dl_fail_dump(rtwdev);
1804 return ret;
1805 }
1806
rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1807 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1808 bool include_bb)
1809 {
1810 int retry;
1811 int ret;
1812
1813 for (retry = 0; retry < 5; retry++) {
1814 ret = __rtw89_fw_download(rtwdev, type, include_bb);
1815 if (!ret)
1816 return 0;
1817 }
1818
1819 return ret;
1820 }
1821
rtw89_wait_firmware_completion(struct rtw89_dev * rtwdev)1822 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
1823 {
1824 struct rtw89_fw_info *fw = &rtwdev->fw;
1825
1826 wait_for_completion(&fw->req.completion);
1827 if (!fw->req.firmware)
1828 return -EINVAL;
1829
1830 return 0;
1831 }
1832
rtw89_load_firmware_req(struct rtw89_dev * rtwdev,struct rtw89_fw_req_info * req,const char * fw_name,bool nowarn)1833 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
1834 struct rtw89_fw_req_info *req,
1835 const char *fw_name, bool nowarn)
1836 {
1837 int ret;
1838
1839 if (req->firmware) {
1840 rtw89_debug(rtwdev, RTW89_DBG_FW,
1841 "full firmware has been early requested\n");
1842 complete_all(&req->completion);
1843 return 0;
1844 }
1845
1846 if (nowarn)
1847 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
1848 else
1849 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
1850
1851 complete_all(&req->completion);
1852
1853 return ret;
1854 }
1855
rtw89_load_firmware_work(struct work_struct * work)1856 void rtw89_load_firmware_work(struct work_struct *work)
1857 {
1858 struct rtw89_dev *rtwdev =
1859 container_of(work, struct rtw89_dev, load_firmware_work);
1860 const struct rtw89_chip_info *chip = rtwdev->chip;
1861 char fw_name[64];
1862
1863 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
1864 chip->fw_basename, rtwdev->fw.fw_format);
1865
1866 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
1867 }
1868
rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table * tbl)1869 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
1870 {
1871 if (!tbl)
1872 return;
1873
1874 kfree(tbl->regs);
1875 kfree(tbl);
1876 }
1877
rtw89_unload_firmware_elements(struct rtw89_dev * rtwdev)1878 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
1879 {
1880 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1881 int i;
1882
1883 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
1884 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
1885 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
1886 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
1887 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
1888
1889 kfree(elm_info->txpwr_trk);
1890 kfree(elm_info->rfk_log_fmt);
1891 }
1892
rtw89_unload_firmware(struct rtw89_dev * rtwdev)1893 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
1894 {
1895 struct rtw89_fw_info *fw = &rtwdev->fw;
1896
1897 cancel_work_sync(&rtwdev->load_firmware_work);
1898
1899 if (fw->req.firmware) {
1900 release_firmware(fw->req.firmware);
1901
1902 /* assign NULL back in case rtw89_free_ieee80211_hw()
1903 * try to release the same one again.
1904 */
1905 fw->req.firmware = NULL;
1906 }
1907
1908 kfree(fw->log.fmts);
1909 rtw89_unload_firmware_elements(rtwdev);
1910 }
1911
rtw89_fw_log_get_fmt_idx(struct rtw89_dev * rtwdev,u32 fmt_id)1912 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
1913 {
1914 struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
1915 u32 i;
1916
1917 if (fmt_id > fw_log->last_fmt_id)
1918 return 0;
1919
1920 for (i = 0; i < fw_log->fmt_count; i++) {
1921 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
1922 return i;
1923 }
1924 return 0;
1925 }
1926
rtw89_fw_log_create_fmts_dict(struct rtw89_dev * rtwdev)1927 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
1928 {
1929 struct rtw89_fw_log *log = &rtwdev->fw.log;
1930 const struct rtw89_fw_logsuit_hdr *suit_hdr;
1931 struct rtw89_fw_suit *suit = &log->suit;
1932 const void *fmts_ptr, *fmts_end_ptr;
1933 u32 fmt_count;
1934 int i;
1935
1936 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
1937 fmt_count = le32_to_cpu(suit_hdr->count);
1938 log->fmt_ids = suit_hdr->ids;
1939 fmts_ptr = &suit_hdr->ids[fmt_count];
1940 fmts_end_ptr = suit->data + suit->size;
1941 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
1942 if (!log->fmts)
1943 return -ENOMEM;
1944
1945 for (i = 0; i < fmt_count; i++) {
1946 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
1947 if (!fmts_ptr)
1948 break;
1949
1950 (*log->fmts)[i] = fmts_ptr;
1951 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
1952 log->fmt_count++;
1953 fmts_ptr += strlen(fmts_ptr);
1954 }
1955
1956 return 0;
1957 }
1958
rtw89_fw_log_prepare(struct rtw89_dev * rtwdev)1959 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
1960 {
1961 struct rtw89_fw_log *log = &rtwdev->fw.log;
1962 struct rtw89_fw_suit *suit = &log->suit;
1963
1964 if (!suit || !suit->data) {
1965 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
1966 return -EINVAL;
1967 }
1968 if (log->fmts)
1969 return 0;
1970
1971 return rtw89_fw_log_create_fmts_dict(rtwdev);
1972 }
1973
rtw89_fw_log_dump_data(struct rtw89_dev * rtwdev,const struct rtw89_fw_c2h_log_fmt * log_fmt,u32 fmt_idx,u8 para_int,bool raw_data)1974 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
1975 const struct rtw89_fw_c2h_log_fmt *log_fmt,
1976 u32 fmt_idx, u8 para_int, bool raw_data)
1977 {
1978 const char *(*fmts)[] = rtwdev->fw.log.fmts;
1979 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
1980 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
1981 int i;
1982
1983 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
1984 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
1985 log_fmt->argc);
1986 return;
1987 }
1988
1989 if (para_int)
1990 for (i = 0 ; i < log_fmt->argc; i++)
1991 args[i] = le32_to_cpu(log_fmt->u.argv[i]);
1992
1993 if (raw_data) {
1994 if (para_int)
1995 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
1996 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
1997 para_int, log_fmt->argc, (int)sizeof(args), args);
1998 else
1999 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
2000 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
2001 para_int, log_fmt->argc, log_fmt->u.raw);
2002 } else {
2003 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
2004 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
2005 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
2006 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
2007 args[0xf]);
2008 }
2009
2010 rtw89_info(rtwdev, "C2H log: %s", str_buf);
2011 }
2012
rtw89_fw_log_dump(struct rtw89_dev * rtwdev,u8 * buf,u32 len)2013 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
2014 {
2015 const struct rtw89_fw_c2h_log_fmt *log_fmt;
2016 u8 para_int;
2017 u32 fmt_idx;
2018
2019 if (len < RTW89_C2H_HEADER_LEN) {
2020 rtw89_err(rtwdev, "c2h log length is wrong!\n");
2021 return;
2022 }
2023
2024 buf += RTW89_C2H_HEADER_LEN;
2025 len -= RTW89_C2H_HEADER_LEN;
2026 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
2027
2028 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
2029 goto plain_log;
2030
2031 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
2032 goto plain_log;
2033
2034 if (!rtwdev->fw.log.fmts)
2035 return;
2036
2037 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
2038 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
2039
2040 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
2041 rtw89_info(rtwdev, "C2H log: %s%s",
2042 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
2043 else if (fmt_idx != 0 && para_int)
2044 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
2045 else
2046 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
2047 return;
2048
2049 plain_log:
2050 rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
2051
2052 }
2053
2054 #define H2C_CAM_LEN 60
rtw89_fw_h2c_cam(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,const u8 * scan_mac_addr)2055 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
2056 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr)
2057 {
2058 struct sk_buff *skb;
2059 int ret;
2060
2061 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
2062 if (!skb) {
2063 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2064 return -ENOMEM;
2065 }
2066 skb_put(skb, H2C_CAM_LEN);
2067 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr,
2068 skb->data);
2069 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data);
2070
2071 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2072 H2C_CAT_MAC,
2073 H2C_CL_MAC_ADDR_CAM_UPDATE,
2074 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
2075 H2C_CAM_LEN);
2076
2077 ret = rtw89_h2c_tx(rtwdev, skb, false);
2078 if (ret) {
2079 rtw89_err(rtwdev, "failed to send h2c\n");
2080 goto fail;
2081 }
2082
2083 return 0;
2084 fail:
2085 dev_kfree_skb_any(skb);
2086
2087 return ret;
2088 }
2089
rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2090 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
2091 struct rtw89_vif_link *rtwvif_link,
2092 struct rtw89_sta_link *rtwsta_link)
2093 {
2094 struct rtw89_h2c_dctlinfo_ud_v1 *h2c;
2095 u32 len = sizeof(*h2c);
2096 struct sk_buff *skb;
2097 int ret;
2098
2099 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2100 if (!skb) {
2101 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2102 return -ENOMEM;
2103 }
2104 skb_put(skb, len);
2105 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data;
2106
2107 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c);
2108
2109 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2110 H2C_CAT_MAC,
2111 H2C_CL_MAC_FR_EXCHG,
2112 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
2113 len);
2114
2115 ret = rtw89_h2c_tx(rtwdev, skb, false);
2116 if (ret) {
2117 rtw89_err(rtwdev, "failed to send h2c\n");
2118 goto fail;
2119 }
2120
2121 return 0;
2122 fail:
2123 dev_kfree_skb_any(skb);
2124
2125 return ret;
2126 }
2127 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
2128
rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2129 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
2130 struct rtw89_vif_link *rtwvif_link,
2131 struct rtw89_sta_link *rtwsta_link)
2132 {
2133 struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
2134 u32 len = sizeof(*h2c);
2135 struct sk_buff *skb;
2136 int ret;
2137
2138 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2139 if (!skb) {
2140 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2141 return -ENOMEM;
2142 }
2143 skb_put(skb, len);
2144 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
2145
2146 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c);
2147
2148 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2149 H2C_CAT_MAC,
2150 H2C_CL_MAC_FR_EXCHG,
2151 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
2152 len);
2153
2154 ret = rtw89_h2c_tx(rtwdev, skb, false);
2155 if (ret) {
2156 rtw89_err(rtwdev, "failed to send h2c\n");
2157 goto fail;
2158 }
2159
2160 return 0;
2161 fail:
2162 dev_kfree_skb_any(skb);
2163
2164 return ret;
2165 }
2166 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
2167
rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2168 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
2169 struct rtw89_vif_link *rtwvif_link,
2170 struct rtw89_sta_link *rtwsta_link)
2171 {
2172 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
2173 struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
2174 u32 len = sizeof(*h2c);
2175 struct sk_buff *skb;
2176 int ret;
2177
2178 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2179 if (!skb) {
2180 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
2181 return -ENOMEM;
2182 }
2183 skb_put(skb, len);
2184 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
2185
2186 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
2187 le32_encode_bits(1, DCTLINFO_V2_C0_OP);
2188
2189 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
2190 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
2191 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
2192 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
2193 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
2194 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
2195 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
2196 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
2197 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
2198 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
2199 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
2200 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
2201 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
2202
2203 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2204 H2C_CAT_MAC,
2205 H2C_CL_MAC_FR_EXCHG,
2206 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
2207 len);
2208
2209 ret = rtw89_h2c_tx(rtwdev, skb, false);
2210 if (ret) {
2211 rtw89_err(rtwdev, "failed to send h2c\n");
2212 goto fail;
2213 }
2214
2215 return 0;
2216 fail:
2217 dev_kfree_skb_any(skb);
2218
2219 return ret;
2220 }
2221 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
2222
rtw89_fw_h2c_ba_cam(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool valid,struct ieee80211_ampdu_params * params)2223 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev,
2224 struct rtw89_vif_link *rtwvif_link,
2225 struct rtw89_sta_link *rtwsta_link,
2226 bool valid, struct ieee80211_ampdu_params *params)
2227 {
2228 const struct rtw89_chip_info *chip = rtwdev->chip;
2229 struct rtw89_h2c_ba_cam *h2c;
2230 u8 macid = rtwsta_link->mac_id;
2231 u32 len = sizeof(*h2c);
2232 struct sk_buff *skb;
2233 u8 entry_idx;
2234 int ret;
2235
2236 ret = valid ?
2237 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2238 &entry_idx) :
2239 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2240 &entry_idx);
2241 if (ret) {
2242 /* it still works even if we don't have static BA CAM, because
2243 * hardware can create dynamic BA CAM automatically.
2244 */
2245 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2246 "failed to %s entry tid=%d for h2c ba cam\n",
2247 valid ? "alloc" : "free", params->tid);
2248 return 0;
2249 }
2250
2251 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2252 if (!skb) {
2253 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2254 return -ENOMEM;
2255 }
2256 skb_put(skb, len);
2257 h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2258
2259 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
2260 if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
2261 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
2262 else
2263 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
2264 if (!valid)
2265 goto end;
2266 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
2267 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
2268 if (params->buf_size > 64)
2269 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2270 else
2271 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2272 /* If init req is set, hw will set the ssn */
2273 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
2274 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
2275
2276 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
2277 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
2278 le32_encode_bits(rtwvif_link->mac_idx,
2279 RTW89_H2C_BA_CAM_W1_BAND);
2280 }
2281
2282 end:
2283 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2284 H2C_CAT_MAC,
2285 H2C_CL_BA_CAM,
2286 H2C_FUNC_MAC_BA_CAM, 0, 1,
2287 len);
2288
2289 ret = rtw89_h2c_tx(rtwdev, skb, false);
2290 if (ret) {
2291 rtw89_err(rtwdev, "failed to send h2c\n");
2292 goto fail;
2293 }
2294
2295 return 0;
2296 fail:
2297 dev_kfree_skb_any(skb);
2298
2299 return ret;
2300 }
2301 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
2302
rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev * rtwdev,u8 entry_idx,u8 uid)2303 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
2304 u8 entry_idx, u8 uid)
2305 {
2306 struct rtw89_h2c_ba_cam *h2c;
2307 u32 len = sizeof(*h2c);
2308 struct sk_buff *skb;
2309 int ret;
2310
2311 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2312 if (!skb) {
2313 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
2314 return -ENOMEM;
2315 }
2316 skb_put(skb, len);
2317 h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2318
2319 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
2320 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
2321 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
2322 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
2323 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
2324
2325 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2326 H2C_CAT_MAC,
2327 H2C_CL_BA_CAM,
2328 H2C_FUNC_MAC_BA_CAM, 0, 1,
2329 len);
2330
2331 ret = rtw89_h2c_tx(rtwdev, skb, false);
2332 if (ret) {
2333 rtw89_err(rtwdev, "failed to send h2c\n");
2334 goto fail;
2335 }
2336
2337 return 0;
2338 fail:
2339 dev_kfree_skb_any(skb);
2340
2341 return ret;
2342 }
2343
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev * rtwdev)2344 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
2345 {
2346 const struct rtw89_chip_info *chip = rtwdev->chip;
2347 u8 entry_idx = chip->bacam_num;
2348 u8 uid = 0;
2349 int i;
2350
2351 for (i = 0; i < chip->bacam_dynamic_num; i++) {
2352 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
2353 entry_idx++;
2354 uid++;
2355 }
2356 }
2357
rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool valid,struct ieee80211_ampdu_params * params)2358 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev,
2359 struct rtw89_vif_link *rtwvif_link,
2360 struct rtw89_sta_link *rtwsta_link,
2361 bool valid, struct ieee80211_ampdu_params *params)
2362 {
2363 const struct rtw89_chip_info *chip = rtwdev->chip;
2364 struct rtw89_h2c_ba_cam_v1 *h2c;
2365 u8 macid = rtwsta_link->mac_id;
2366 u32 len = sizeof(*h2c);
2367 struct sk_buff *skb;
2368 u8 entry_idx;
2369 u8 bmap_size;
2370 int ret;
2371
2372 ret = valid ?
2373 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2374 &entry_idx) :
2375 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2376 &entry_idx);
2377 if (ret) {
2378 /* it still works even if we don't have static BA CAM, because
2379 * hardware can create dynamic BA CAM automatically.
2380 */
2381 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2382 "failed to %s entry tid=%d for h2c ba cam\n",
2383 valid ? "alloc" : "free", params->tid);
2384 return 0;
2385 }
2386
2387 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2388 if (!skb) {
2389 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2390 return -ENOMEM;
2391 }
2392 skb_put(skb, len);
2393 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
2394
2395 if (params->buf_size > 512)
2396 bmap_size = 10;
2397 else if (params->buf_size > 256)
2398 bmap_size = 8;
2399 else if (params->buf_size > 64)
2400 bmap_size = 4;
2401 else
2402 bmap_size = 0;
2403
2404 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
2405 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
2406 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
2407 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
2408 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
2409 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
2410
2411 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
2412 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
2413 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
2414 le32_encode_bits(!!rtwvif_link->mac_idx,
2415 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
2416
2417 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2418 H2C_CAT_MAC,
2419 H2C_CL_BA_CAM,
2420 H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
2421 len);
2422
2423 ret = rtw89_h2c_tx(rtwdev, skb, false);
2424 if (ret) {
2425 rtw89_err(rtwdev, "failed to send h2c\n");
2426 goto fail;
2427 }
2428
2429 return 0;
2430 fail:
2431 dev_kfree_skb_any(skb);
2432
2433 return ret;
2434 }
2435 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
2436
rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev * rtwdev,u8 users,u8 offset,u8 mac_idx)2437 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
2438 u8 offset, u8 mac_idx)
2439 {
2440 struct rtw89_h2c_ba_cam_init *h2c;
2441 u32 len = sizeof(*h2c);
2442 struct sk_buff *skb;
2443 int ret;
2444
2445 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2446 if (!skb) {
2447 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
2448 return -ENOMEM;
2449 }
2450 skb_put(skb, len);
2451 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
2452
2453 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
2454 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
2455 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
2456
2457 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2458 H2C_CAT_MAC,
2459 H2C_CL_BA_CAM,
2460 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
2461 len);
2462
2463 ret = rtw89_h2c_tx(rtwdev, skb, false);
2464 if (ret) {
2465 rtw89_err(rtwdev, "failed to send h2c\n");
2466 goto fail;
2467 }
2468
2469 return 0;
2470 fail:
2471 dev_kfree_skb_any(skb);
2472
2473 return ret;
2474 }
2475
2476 #define H2C_LOG_CFG_LEN 12
rtw89_fw_h2c_fw_log(struct rtw89_dev * rtwdev,bool enable)2477 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
2478 {
2479 struct sk_buff *skb;
2480 u32 comp = 0;
2481 int ret;
2482
2483 if (enable)
2484 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
2485 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
2486 BIT(RTW89_FW_LOG_COMP_SCAN);
2487
2488 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
2489 if (!skb) {
2490 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
2491 return -ENOMEM;
2492 }
2493
2494 skb_put(skb, H2C_LOG_CFG_LEN);
2495 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
2496 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
2497 SET_LOG_CFG_COMP(skb->data, comp);
2498 SET_LOG_CFG_COMP_EXT(skb->data, 0);
2499
2500 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2501 H2C_CAT_MAC,
2502 H2C_CL_FW_INFO,
2503 H2C_FUNC_LOG_CFG, 0, 0,
2504 H2C_LOG_CFG_LEN);
2505
2506 ret = rtw89_h2c_tx(rtwdev, skb, false);
2507 if (ret) {
2508 rtw89_err(rtwdev, "failed to send h2c\n");
2509 goto fail;
2510 }
2511
2512 return 0;
2513 fail:
2514 dev_kfree_skb_any(skb);
2515
2516 return ret;
2517 }
2518
rtw89_eapol_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2519 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
2520 struct rtw89_vif_link *rtwvif_link)
2521 {
2522 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88,
2523 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
2524 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2525 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2526 struct rtw89_eapol_2_of_2 *eapol_pkt;
2527 struct ieee80211_bss_conf *bss_conf;
2528 struct ieee80211_hdr_3addr *hdr;
2529 struct sk_buff *skb;
2530 u8 key_des_ver;
2531
2532 if (rtw_wow->ptk_alg == 3)
2533 key_des_ver = 1;
2534 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2)
2535 key_des_ver = 2;
2536 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7)
2537 key_des_ver = 3;
2538 else
2539 key_des_ver = 0;
2540
2541 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt));
2542 if (!skb)
2543 return NULL;
2544
2545 hdr = skb_put_zero(skb, sizeof(*hdr));
2546 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2547 IEEE80211_FCTL_TODS |
2548 IEEE80211_FCTL_PROTECTED);
2549
2550 rcu_read_lock();
2551
2552 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2553
2554 ether_addr_copy(hdr->addr1, bss_conf->bssid);
2555 ether_addr_copy(hdr->addr2, bss_conf->addr);
2556 ether_addr_copy(hdr->addr3, bss_conf->bssid);
2557
2558 rcu_read_unlock();
2559
2560 skb_put_zero(skb, sec_hdr_len);
2561
2562 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
2563 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody));
2564 eapol_pkt->key_des_ver = key_des_ver;
2565
2566 return skb;
2567 }
2568
rtw89_sa_query_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2569 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
2570 struct rtw89_vif_link *rtwvif_link)
2571 {
2572 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2573 struct ieee80211_bss_conf *bss_conf;
2574 struct ieee80211_hdr_3addr *hdr;
2575 struct rtw89_sa_query *sa_query;
2576 struct sk_buff *skb;
2577
2578 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query));
2579 if (!skb)
2580 return NULL;
2581
2582 hdr = skb_put_zero(skb, sizeof(*hdr));
2583 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2584 IEEE80211_STYPE_ACTION |
2585 IEEE80211_FCTL_PROTECTED);
2586
2587 rcu_read_lock();
2588
2589 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2590
2591 ether_addr_copy(hdr->addr1, bss_conf->bssid);
2592 ether_addr_copy(hdr->addr2, bss_conf->addr);
2593 ether_addr_copy(hdr->addr3, bss_conf->bssid);
2594
2595 rcu_read_unlock();
2596
2597 skb_put_zero(skb, sec_hdr_len);
2598
2599 sa_query = skb_put_zero(skb, sizeof(*sa_query));
2600 sa_query->category = WLAN_CATEGORY_SA_QUERY;
2601 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE;
2602
2603 return skb;
2604 }
2605
rtw89_arp_response_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2606 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
2607 struct rtw89_vif_link *rtwvif_link)
2608 {
2609 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
2610 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2611 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2612 struct ieee80211_hdr_3addr *hdr;
2613 struct rtw89_arp_rsp *arp_skb;
2614 struct arphdr *arp_hdr;
2615 struct sk_buff *skb;
2616 __le16 fc;
2617
2618 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb));
2619 if (!skb)
2620 return NULL;
2621
2622 hdr = skb_put_zero(skb, sizeof(*hdr));
2623
2624 if (rtw_wow->ptk_alg)
2625 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS |
2626 IEEE80211_FCTL_PROTECTED);
2627 else
2628 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
2629
2630 hdr->frame_control = fc;
2631 ether_addr_copy(hdr->addr1, rtwvif_link->bssid);
2632 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr);
2633 ether_addr_copy(hdr->addr3, rtwvif_link->bssid);
2634
2635 skb_put_zero(skb, sec_hdr_len);
2636
2637 arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
2638 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
2639 arp_skb->llc_type = htons(ETH_P_ARP);
2640
2641 arp_hdr = &arp_skb->arp_hdr;
2642 arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
2643 arp_hdr->ar_pro = htons(ETH_P_IP);
2644 arp_hdr->ar_hln = ETH_ALEN;
2645 arp_hdr->ar_pln = 4;
2646 arp_hdr->ar_op = htons(ARPOP_REPLY);
2647
2648 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr);
2649 arp_skb->sender_ip = rtwvif->ip_addr;
2650
2651 return skb;
2652 }
2653
rtw89_fw_h2c_add_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,enum rtw89_fw_pkt_ofld_type type,u8 * id)2654 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
2655 struct rtw89_vif_link *rtwvif_link,
2656 enum rtw89_fw_pkt_ofld_type type,
2657 u8 *id)
2658 {
2659 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
2660 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
2661 struct rtw89_pktofld_info *info;
2662 struct sk_buff *skb;
2663 int ret;
2664
2665 info = kzalloc(sizeof(*info), GFP_KERNEL);
2666 if (!info)
2667 return -ENOMEM;
2668
2669 switch (type) {
2670 case RTW89_PKT_OFLD_TYPE_PS_POLL:
2671 skb = ieee80211_pspoll_get(rtwdev->hw, vif);
2672 break;
2673 case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
2674 skb = ieee80211_proberesp_get(rtwdev->hw, vif);
2675 break;
2676 case RTW89_PKT_OFLD_TYPE_NULL_DATA:
2677 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
2678 break;
2679 case RTW89_PKT_OFLD_TYPE_QOS_NULL:
2680 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
2681 break;
2682 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
2683 skb = rtw89_eapol_get(rtwdev, rtwvif_link);
2684 break;
2685 case RTW89_PKT_OFLD_TYPE_SA_QUERY:
2686 skb = rtw89_sa_query_get(rtwdev, rtwvif_link);
2687 break;
2688 case RTW89_PKT_OFLD_TYPE_ARP_RSP:
2689 skb = rtw89_arp_response_get(rtwdev, rtwvif_link);
2690 break;
2691 default:
2692 goto err;
2693 }
2694
2695 if (!skb)
2696 goto err;
2697
2698 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
2699 kfree_skb(skb);
2700
2701 if (ret)
2702 goto err;
2703
2704 list_add_tail(&info->list, &rtwvif_link->general_pkt_list);
2705 *id = info->id;
2706 return 0;
2707
2708 err:
2709 kfree(info);
2710 return -ENOMEM;
2711 }
2712
rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool notify_fw)2713 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
2714 struct rtw89_vif_link *rtwvif_link,
2715 bool notify_fw)
2716 {
2717 struct list_head *pkt_list = &rtwvif_link->general_pkt_list;
2718 struct rtw89_pktofld_info *info, *tmp;
2719
2720 list_for_each_entry_safe(info, tmp, pkt_list, list) {
2721 if (notify_fw)
2722 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
2723 else
2724 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
2725 list_del(&info->list);
2726 kfree(info);
2727 }
2728 }
2729
rtw89_fw_release_general_pkt_list(struct rtw89_dev * rtwdev,bool notify_fw)2730 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
2731 {
2732 struct rtw89_vif_link *rtwvif_link;
2733 struct rtw89_vif *rtwvif;
2734 unsigned int link_id;
2735
2736 rtw89_for_each_rtwvif(rtwdev, rtwvif)
2737 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
2738 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link,
2739 notify_fw);
2740 }
2741
2742 #define H2C_GENERAL_PKT_LEN 6
2743 #define H2C_GENERAL_PKT_ID_UND 0xff
rtw89_fw_h2c_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u8 macid)2744 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
2745 struct rtw89_vif_link *rtwvif_link, u8 macid)
2746 {
2747 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
2748 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
2749 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
2750 struct sk_buff *skb;
2751 int ret;
2752
2753 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2754 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
2755 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2756 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
2757 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2758 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
2759
2760 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
2761 if (!skb) {
2762 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2763 return -ENOMEM;
2764 }
2765 skb_put(skb, H2C_GENERAL_PKT_LEN);
2766 SET_GENERAL_PKT_MACID(skb->data, macid);
2767 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
2768 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
2769 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
2770 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
2771 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
2772
2773 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2774 H2C_CAT_MAC,
2775 H2C_CL_FW_INFO,
2776 H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
2777 H2C_GENERAL_PKT_LEN);
2778
2779 ret = rtw89_h2c_tx(rtwdev, skb, false);
2780 if (ret) {
2781 rtw89_err(rtwdev, "failed to send h2c\n");
2782 goto fail;
2783 }
2784
2785 return 0;
2786 fail:
2787 dev_kfree_skb_any(skb);
2788
2789 return ret;
2790 }
2791
2792 #define H2C_LPS_PARM_LEN 8
rtw89_fw_h2c_lps_parm(struct rtw89_dev * rtwdev,struct rtw89_lps_parm * lps_param)2793 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
2794 struct rtw89_lps_parm *lps_param)
2795 {
2796 struct sk_buff *skb;
2797 int ret;
2798
2799 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
2800 if (!skb) {
2801 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2802 return -ENOMEM;
2803 }
2804 skb_put(skb, H2C_LPS_PARM_LEN);
2805
2806 SET_LPS_PARM_MACID(skb->data, lps_param->macid);
2807 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
2808 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
2809 SET_LPS_PARM_RLBM(skb->data, 1);
2810 SET_LPS_PARM_SMARTPS(skb->data, 1);
2811 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
2812 SET_LPS_PARM_VOUAPSD(skb->data, 0);
2813 SET_LPS_PARM_VIUAPSD(skb->data, 0);
2814 SET_LPS_PARM_BEUAPSD(skb->data, 0);
2815 SET_LPS_PARM_BKUAPSD(skb->data, 0);
2816
2817 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2818 H2C_CAT_MAC,
2819 H2C_CL_MAC_PS,
2820 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode,
2821 H2C_LPS_PARM_LEN);
2822
2823 ret = rtw89_h2c_tx(rtwdev, skb, false);
2824 if (ret) {
2825 rtw89_err(rtwdev, "failed to send h2c\n");
2826 goto fail;
2827 }
2828
2829 return 0;
2830 fail:
2831 dev_kfree_skb_any(skb);
2832
2833 return ret;
2834 }
2835
rtw89_fw_h2c_lps_ch_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2836 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2837 {
2838 const struct rtw89_chip_info *chip = rtwdev->chip;
2839 const struct rtw89_chan *chan;
2840 struct rtw89_vif_link *rtwvif_link;
2841 struct rtw89_h2c_lps_ch_info *h2c;
2842 u32 len = sizeof(*h2c);
2843 unsigned int link_id;
2844 struct sk_buff *skb;
2845 bool no_chan = true;
2846 u8 phy_idx;
2847 u32 done;
2848 int ret;
2849
2850 if (chip->chip_gen != RTW89_CHIP_BE)
2851 return 0;
2852
2853 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2854 if (!skb) {
2855 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
2856 return -ENOMEM;
2857 }
2858 skb_put(skb, len);
2859 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
2860
2861 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
2862 phy_idx = rtwvif_link->phy_idx;
2863 if (phy_idx >= ARRAY_SIZE(h2c->info))
2864 continue;
2865
2866 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2867 no_chan = false;
2868
2869 h2c->info[phy_idx].central_ch = chan->channel;
2870 h2c->info[phy_idx].pri_ch = chan->primary_channel;
2871 h2c->info[phy_idx].band = chan->band_type;
2872 h2c->info[phy_idx].bw = chan->band_width;
2873 }
2874
2875 if (no_chan) {
2876 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
2877 ret = -ENOENT;
2878 goto fail;
2879 }
2880
2881 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
2882
2883 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2884 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
2885 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
2886
2887 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
2888 ret = rtw89_h2c_tx(rtwdev, skb, false);
2889 if (ret) {
2890 rtw89_err(rtwdev, "failed to send h2c\n");
2891 goto fail;
2892 }
2893
2894 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
2895 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
2896 if (ret)
2897 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n");
2898
2899 return 0;
2900 fail:
2901 dev_kfree_skb_any(skb);
2902
2903 return ret;
2904 }
2905
rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2906 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
2907 struct rtw89_vif *rtwvif)
2908 {
2909 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
2910 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
2911 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12};
2912 const struct rtw89_chip_info *chip = rtwdev->chip;
2913 struct rtw89_efuse *efuse = &rtwdev->efuse;
2914 struct rtw89_h2c_lps_ml_cmn_info *h2c;
2915 struct rtw89_vif_link *rtwvif_link;
2916 const struct rtw89_chan *chan;
2917 u8 bw_idx = RTW89_BB_BW_20_40;
2918 u32 len = sizeof(*h2c);
2919 unsigned int link_id;
2920 struct sk_buff *skb;
2921 u8 beacon_bw_ofst;
2922 u8 gain_band;
2923 u32 done;
2924 u8 path;
2925 int ret;
2926 int i;
2927
2928 if (chip->chip_gen != RTW89_CHIP_BE)
2929 return 0;
2930
2931 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2932 if (!skb) {
2933 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
2934 return -ENOMEM;
2935 }
2936 skb_put(skb, len);
2937 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
2938
2939 h2c->fmt_id = 0x3;
2940
2941 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
2942 h2c->rfe_type = efuse->rfe_type;
2943
2944 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
2945 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
2946 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2947 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
2948
2949 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
2950 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
2951 h2c->band[rtwvif_link->phy_idx] = chan->band_type;
2952 h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
2953 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
2954 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
2955 else
2956 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
2957
2958 /* Fill BW20 RX gain table for beacon mode */
2959 for (i = 0; i < TIA_GAIN_NUM; i++) {
2960 h2c->tia_gain[rtwvif_link->phy_idx][i] =
2961 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
2962 }
2963
2964 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) {
2965 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx];
2966 h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst;
2967 }
2968
2969 memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
2970 gain->lna_gain[gain_band][bw_idx][path],
2971 LNA_GAIN_NUM);
2972 memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx],
2973 gain->tia_lna_op1db[gain_band][bw_idx][path],
2974 LNA_GAIN_NUM + 1);
2975 memcpy(h2c->lna_op1db[rtwvif_link->phy_idx],
2976 gain->lna_op1db[gain_band][bw_idx][path],
2977 LNA_GAIN_NUM);
2978 }
2979
2980 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2981 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
2982 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
2983
2984 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
2985 ret = rtw89_h2c_tx(rtwdev, skb, false);
2986 if (ret) {
2987 rtw89_err(rtwdev, "failed to send h2c\n");
2988 goto fail;
2989 }
2990
2991 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
2992 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
2993 if (ret)
2994 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
2995
2996 return 0;
2997 fail:
2998 dev_kfree_skb_any(skb);
2999
3000 return ret;
3001 }
3002
3003 #define H2C_P2P_ACT_LEN 20
rtw89_fw_h2c_p2p_act(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_bss_conf * bss_conf,struct ieee80211_p2p_noa_desc * desc,u8 act,u8 noa_id)3004 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
3005 struct rtw89_vif_link *rtwvif_link,
3006 struct ieee80211_bss_conf *bss_conf,
3007 struct ieee80211_p2p_noa_desc *desc,
3008 u8 act, u8 noa_id)
3009 {
3010 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
3011 u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow;
3012 struct sk_buff *skb;
3013 u8 *cmd;
3014 int ret;
3015
3016 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
3017 if (!skb) {
3018 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
3019 return -ENOMEM;
3020 }
3021 skb_put(skb, H2C_P2P_ACT_LEN);
3022 cmd = skb->data;
3023
3024 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id);
3025 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
3026 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
3027 RTW89_SET_FWCMD_P2P_ACT(cmd, act);
3028 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
3029 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
3030 if (desc) {
3031 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
3032 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
3033 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
3034 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
3035 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
3036 }
3037
3038 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3039 H2C_CAT_MAC, H2C_CL_MAC_PS,
3040 H2C_FUNC_P2P_ACT, 0, 0,
3041 H2C_P2P_ACT_LEN);
3042
3043 ret = rtw89_h2c_tx(rtwdev, skb, false);
3044 if (ret) {
3045 rtw89_err(rtwdev, "failed to send h2c\n");
3046 goto fail;
3047 }
3048
3049 return 0;
3050 fail:
3051 dev_kfree_skb_any(skb);
3052
3053 return ret;
3054 }
3055
__rtw89_fw_h2c_set_tx_path(struct rtw89_dev * rtwdev,struct sk_buff * skb)3056 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
3057 struct sk_buff *skb)
3058 {
3059 const struct rtw89_chip_info *chip = rtwdev->chip;
3060 struct rtw89_hal *hal = &rtwdev->hal;
3061 u8 ntx_path;
3062 u8 map_b;
3063
3064 if (chip->rf_path_num == 1) {
3065 ntx_path = RF_A;
3066 map_b = 0;
3067 } else {
3068 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
3069 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
3070 }
3071
3072 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
3073 SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
3074 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
3075 SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
3076 SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
3077 }
3078
3079 #define H2C_CMC_TBL_LEN 68
rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3080 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
3081 struct rtw89_vif_link *rtwvif_link,
3082 struct rtw89_sta_link *rtwsta_link)
3083 {
3084 const struct rtw89_chip_info *chip = rtwdev->chip;
3085 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3086 struct sk_buff *skb;
3087 int ret;
3088
3089 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3090 if (!skb) {
3091 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3092 return -ENOMEM;
3093 }
3094 skb_put(skb, H2C_CMC_TBL_LEN);
3095 SET_CTRL_INFO_MACID(skb->data, macid);
3096 SET_CTRL_INFO_OPERATION(skb->data, 1);
3097 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3098 SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
3099 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
3100 SET_CMC_TBL_ANTSEL_A(skb->data, 0);
3101 SET_CMC_TBL_ANTSEL_B(skb->data, 0);
3102 SET_CMC_TBL_ANTSEL_C(skb->data, 0);
3103 SET_CMC_TBL_ANTSEL_D(skb->data, 0);
3104 }
3105 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
3106 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
3107 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3108 SET_CMC_TBL_DATA_DCM(skb->data, 0);
3109
3110 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3111 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3112 chip->h2c_cctl_func_id, 0, 1,
3113 H2C_CMC_TBL_LEN);
3114
3115 ret = rtw89_h2c_tx(rtwdev, skb, false);
3116 if (ret) {
3117 rtw89_err(rtwdev, "failed to send h2c\n");
3118 goto fail;
3119 }
3120
3121 return 0;
3122 fail:
3123 dev_kfree_skb_any(skb);
3124
3125 return ret;
3126 }
3127 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
3128
rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3129 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3130 struct rtw89_vif_link *rtwvif_link,
3131 struct rtw89_sta_link *rtwsta_link)
3132 {
3133 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3134 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3135 u32 len = sizeof(*h2c);
3136 struct sk_buff *skb;
3137 int ret;
3138
3139 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3140 if (!skb) {
3141 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3142 return -ENOMEM;
3143 }
3144 skb_put(skb, len);
3145 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3146
3147 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3148 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3149
3150 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
3151 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
3152
3153 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
3154 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
3155 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3156 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
3157
3158 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
3159
3160 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
3161
3162 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3163 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
3164
3165 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
3166 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
3167 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
3168 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
3169 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3170 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
3171
3172 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
3173 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
3174
3175 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
3176 le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
3177 le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
3178 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
3179 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
3180 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
3181
3182 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
3183
3184 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
3185 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
3186 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
3187 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
3188
3189 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
3190 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
3191 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
3192 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
3193
3194 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3195 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3196 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3197 len);
3198
3199 ret = rtw89_h2c_tx(rtwdev, skb, false);
3200 if (ret) {
3201 rtw89_err(rtwdev, "failed to send h2c\n");
3202 goto fail;
3203 }
3204
3205 return 0;
3206 fail:
3207 dev_kfree_skb_any(skb);
3208
3209 return ret;
3210 }
3211 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
3212
__get_sta_he_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_link_sta * link_sta,u8 * pads)3213 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
3214 struct ieee80211_link_sta *link_sta,
3215 u8 *pads)
3216 {
3217 bool ppe_th;
3218 u8 ppe16, ppe8;
3219 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3220 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0];
3221 u8 ru_bitmap;
3222 u8 n, idx, sh;
3223 u16 ppe;
3224 int i;
3225
3226 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
3227 link_sta->he_cap.he_cap_elem.phy_cap_info[6]);
3228 if (!ppe_th) {
3229 u8 pad;
3230
3231 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
3232 link_sta->he_cap.he_cap_elem.phy_cap_info[9]);
3233
3234 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3235 pads[i] = pad;
3236
3237 return;
3238 }
3239
3240 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
3241 n = hweight8(ru_bitmap);
3242 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3243
3244 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3245 if (!(ru_bitmap & BIT(i))) {
3246 pads[i] = 1;
3247 continue;
3248 }
3249
3250 idx = n >> 3;
3251 sh = n & 7;
3252 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
3253
3254 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx]));
3255 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3256 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
3257 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3258
3259 if (ppe16 != 7 && ppe8 == 7)
3260 pads[i] = RTW89_PE_DURATION_16;
3261 else if (ppe8 != 7)
3262 pads[i] = RTW89_PE_DURATION_8;
3263 else
3264 pads[i] = RTW89_PE_DURATION_0;
3265 }
3266 }
3267
rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3268 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
3269 struct rtw89_vif_link *rtwvif_link,
3270 struct rtw89_sta_link *rtwsta_link)
3271 {
3272 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3273 const struct rtw89_chip_info *chip = rtwdev->chip;
3274 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3275 rtwvif_link->chanctx_idx);
3276 struct ieee80211_link_sta *link_sta;
3277 struct sk_buff *skb;
3278 u8 pads[RTW89_PPE_BW_NUM];
3279 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3280 u16 lowest_rate;
3281 int ret;
3282
3283 memset(pads, 0, sizeof(pads));
3284
3285 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3286 if (!skb) {
3287 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3288 return -ENOMEM;
3289 }
3290
3291 rcu_read_lock();
3292
3293 if (rtwsta_link)
3294 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3295
3296 if (rtwsta_link && link_sta->he_cap.has_he)
3297 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3298
3299 if (vif->p2p)
3300 lowest_rate = RTW89_HW_RATE_OFDM6;
3301 else if (chan->band_type == RTW89_BAND_2G)
3302 lowest_rate = RTW89_HW_RATE_CCK1;
3303 else
3304 lowest_rate = RTW89_HW_RATE_OFDM6;
3305
3306 skb_put(skb, H2C_CMC_TBL_LEN);
3307 SET_CTRL_INFO_MACID(skb->data, mac_id);
3308 SET_CTRL_INFO_OPERATION(skb->data, 1);
3309 SET_CMC_TBL_DISRTSFB(skb->data, 1);
3310 SET_CMC_TBL_DISDATAFB(skb->data, 1);
3311 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
3312 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
3313 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
3314 if (vif->type == NL80211_IFTYPE_STATION)
3315 SET_CMC_TBL_ULDL(skb->data, 1);
3316 else
3317 SET_CMC_TBL_ULDL(skb->data, 0);
3318 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port);
3319 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
3320 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3321 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3322 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3323 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3324 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3325 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3326 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3327 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3328 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3329 }
3330 if (rtwsta_link)
3331 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
3332 link_sta->he_cap.has_he);
3333 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3334 SET_CMC_TBL_DATA_DCM(skb->data, 0);
3335
3336 rcu_read_unlock();
3337
3338 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3339 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3340 chip->h2c_cctl_func_id, 0, 1,
3341 H2C_CMC_TBL_LEN);
3342
3343 ret = rtw89_h2c_tx(rtwdev, skb, false);
3344 if (ret) {
3345 rtw89_err(rtwdev, "failed to send h2c\n");
3346 goto fail;
3347 }
3348
3349 return 0;
3350 fail:
3351 dev_kfree_skb_any(skb);
3352
3353 return ret;
3354 }
3355 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
3356
__get_sta_eht_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_link_sta * link_sta,u8 * pads)3357 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
3358 struct ieee80211_link_sta *link_sta,
3359 u8 *pads)
3360 {
3361 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3362 u16 ppe_thres_hdr;
3363 u8 ppe16, ppe8;
3364 u8 n, idx, sh;
3365 u8 ru_bitmap;
3366 bool ppe_th;
3367 u16 ppe;
3368 int i;
3369
3370 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3371 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
3372 if (!ppe_th) {
3373 u8 pad;
3374
3375 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3376 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
3377
3378 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3379 pads[i] = pad;
3380
3381 return;
3382 }
3383
3384 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres);
3385 ru_bitmap = u16_get_bits(ppe_thres_hdr,
3386 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
3387 n = hweight8(ru_bitmap);
3388 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
3389 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3390
3391 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3392 if (!(ru_bitmap & BIT(i))) {
3393 pads[i] = 1;
3394 continue;
3395 }
3396
3397 idx = n >> 3;
3398 sh = n & 7;
3399 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
3400
3401 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx);
3402 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3403 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
3404 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3405
3406 if (ppe16 != 7 && ppe8 == 7)
3407 pads[i] = RTW89_PE_DURATION_16_20;
3408 else if (ppe8 != 7)
3409 pads[i] = RTW89_PE_DURATION_8;
3410 else
3411 pads[i] = RTW89_PE_DURATION_0;
3412 }
3413 }
3414
rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3415 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3416 struct rtw89_vif_link *rtwvif_link,
3417 struct rtw89_sta_link *rtwsta_link)
3418 {
3419 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3420 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3421 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3422 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3423 struct ieee80211_bss_conf *bss_conf;
3424 struct ieee80211_link_sta *link_sta;
3425 u8 pads[RTW89_PPE_BW_NUM];
3426 u32 len = sizeof(*h2c);
3427 struct sk_buff *skb;
3428 u16 lowest_rate;
3429 int ret;
3430
3431 memset(pads, 0, sizeof(pads));
3432
3433 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3434 if (!skb) {
3435 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3436 return -ENOMEM;
3437 }
3438
3439 rcu_read_lock();
3440
3441 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
3442
3443 if (rtwsta_link) {
3444 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3445
3446 if (link_sta->eht_cap.has_eht)
3447 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads);
3448 else if (link_sta->he_cap.has_he)
3449 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3450 }
3451
3452 if (vif->p2p)
3453 lowest_rate = RTW89_HW_RATE_OFDM6;
3454 else if (chan->band_type == RTW89_BAND_2G)
3455 lowest_rate = RTW89_HW_RATE_CCK1;
3456 else
3457 lowest_rate = RTW89_HW_RATE_OFDM6;
3458
3459 skb_put(skb, len);
3460 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3461
3462 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3463 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3464
3465 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
3466 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
3467 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
3468 CCTLINFO_G7_W0_DISDATAFB);
3469
3470 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3471 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3472
3473 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
3474 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
3475
3476 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
3477 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
3478
3479 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
3480 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
3481
3482 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
3483 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
3484 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
3485 }
3486
3487 if (bss_conf->eht_support) {
3488 u16 punct = bss_conf->chanreq.oper.punctured;
3489
3490 h2c->w4 |= le32_encode_bits(~punct,
3491 CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3492 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3493 }
3494
3495 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
3496 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
3497 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
3498 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
3499 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
3500 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
3501 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
3502 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
3503 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
3504 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3505 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
3506 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
3507 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
3508 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
3509 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3510
3511 h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) |
3512 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
3513 CCTLINFO_G7_W6_ULDL);
3514 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
3515
3516 if (rtwsta_link) {
3517 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
3518 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
3519 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
3520 }
3521
3522 rcu_read_unlock();
3523
3524 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3525 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3526 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3527 len);
3528
3529 ret = rtw89_h2c_tx(rtwdev, skb, false);
3530 if (ret) {
3531 rtw89_err(rtwdev, "failed to send h2c\n");
3532 goto fail;
3533 }
3534
3535 return 0;
3536 fail:
3537 dev_kfree_skb_any(skb);
3538
3539 return ret;
3540 }
3541 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
3542
rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3543 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3544 struct rtw89_vif_link *rtwvif_link,
3545 struct rtw89_sta_link *rtwsta_link)
3546 {
3547 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
3548 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3549 u32 len = sizeof(*h2c);
3550 struct sk_buff *skb;
3551 u16 agg_num = 0;
3552 u8 ba_bmap = 0;
3553 int ret;
3554 u8 tid;
3555
3556 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3557 if (!skb) {
3558 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
3559 return -ENOMEM;
3560 }
3561 skb_put(skb, len);
3562 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3563
3564 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
3565 if (agg_num == 0)
3566 agg_num = rtwsta->ampdu_params[tid].agg_num;
3567 else
3568 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
3569 }
3570
3571 if (agg_num <= 0x20)
3572 ba_bmap = 3;
3573 else if (agg_num > 0x20 && agg_num <= 0x40)
3574 ba_bmap = 0;
3575 else if (agg_num > 0x40 && agg_num <= 0x80)
3576 ba_bmap = 1;
3577 else if (agg_num > 0x80 && agg_num <= 0x100)
3578 ba_bmap = 2;
3579 else if (agg_num > 0x100 && agg_num <= 0x200)
3580 ba_bmap = 4;
3581 else if (agg_num > 0x200 && agg_num <= 0x400)
3582 ba_bmap = 5;
3583
3584 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
3585 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3586
3587 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
3588 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
3589
3590 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3591 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3592 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
3593 len);
3594
3595 ret = rtw89_h2c_tx(rtwdev, skb, false);
3596 if (ret) {
3597 rtw89_err(rtwdev, "failed to send h2c\n");
3598 goto fail;
3599 }
3600
3601 return 0;
3602 fail:
3603 dev_kfree_skb_any(skb);
3604
3605 return ret;
3606 }
3607 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
3608
rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)3609 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
3610 struct rtw89_sta_link *rtwsta_link)
3611 {
3612 const struct rtw89_chip_info *chip = rtwdev->chip;
3613 struct sk_buff *skb;
3614 int ret;
3615
3616 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3617 if (!skb) {
3618 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3619 return -ENOMEM;
3620 }
3621 skb_put(skb, H2C_CMC_TBL_LEN);
3622 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
3623 SET_CTRL_INFO_OPERATION(skb->data, 1);
3624 if (rtwsta_link->cctl_tx_time) {
3625 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
3626 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time);
3627 }
3628 if (rtwsta_link->cctl_tx_retry_limit) {
3629 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
3630 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt);
3631 }
3632
3633 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3634 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3635 chip->h2c_cctl_func_id, 0, 1,
3636 H2C_CMC_TBL_LEN);
3637
3638 ret = rtw89_h2c_tx(rtwdev, skb, false);
3639 if (ret) {
3640 rtw89_err(rtwdev, "failed to send h2c\n");
3641 goto fail;
3642 }
3643
3644 return 0;
3645 fail:
3646 dev_kfree_skb_any(skb);
3647
3648 return ret;
3649 }
3650 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl);
3651
rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)3652 int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3653 struct rtw89_sta_link *rtwsta_link)
3654 {
3655 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3656 u32 len = sizeof(*h2c);
3657 struct sk_buff *skb;
3658 int ret;
3659
3660 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3661 if (!skb) {
3662 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n");
3663 return -ENOMEM;
3664 }
3665 skb_put(skb, len);
3666 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3667
3668 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
3669 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3670
3671 if (rtwsta_link->cctl_tx_time) {
3672 h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL);
3673 h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL);
3674
3675 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time,
3676 CCTLINFO_G7_W2_AMPDU_MAX_TIME);
3677 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME);
3678 }
3679 if (rtwsta_link->cctl_tx_retry_limit) {
3680 h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) |
3681 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt,
3682 CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
3683 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL |
3684 CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
3685 }
3686
3687 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3688 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3689 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3690 len);
3691
3692 ret = rtw89_h2c_tx(rtwdev, skb, false);
3693 if (ret) {
3694 rtw89_err(rtwdev, "failed to send h2c\n");
3695 goto fail;
3696 }
3697
3698 return 0;
3699 fail:
3700 dev_kfree_skb_any(skb);
3701
3702 return ret;
3703 }
3704 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7);
3705
rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)3706 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
3707 struct rtw89_sta_link *rtwsta_link)
3708 {
3709 const struct rtw89_chip_info *chip = rtwdev->chip;
3710 struct sk_buff *skb;
3711 int ret;
3712
3713 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
3714 return 0;
3715
3716 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3717 if (!skb) {
3718 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3719 return -ENOMEM;
3720 }
3721 skb_put(skb, H2C_CMC_TBL_LEN);
3722 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
3723 SET_CTRL_INFO_OPERATION(skb->data, 1);
3724
3725 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
3726
3727 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3728 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3729 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
3730 H2C_CMC_TBL_LEN);
3731
3732 ret = rtw89_h2c_tx(rtwdev, skb, false);
3733 if (ret) {
3734 rtw89_err(rtwdev, "failed to send h2c\n");
3735 goto fail;
3736 }
3737
3738 return 0;
3739 fail:
3740 dev_kfree_skb_any(skb);
3741
3742 return ret;
3743 }
3744
rtw89_fw_h2c_update_beacon(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)3745 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
3746 struct rtw89_vif_link *rtwvif_link)
3747 {
3748 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3749 rtwvif_link->chanctx_idx);
3750 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3751 struct rtw89_h2c_bcn_upd *h2c;
3752 struct sk_buff *skb_beacon;
3753 struct ieee80211_hdr *hdr;
3754 u32 len = sizeof(*h2c);
3755 struct sk_buff *skb;
3756 int bcn_total_len;
3757 u16 beacon_rate;
3758 u16 tim_offset;
3759 void *noa_data;
3760 u8 noa_len;
3761 int ret;
3762
3763 if (vif->p2p)
3764 beacon_rate = RTW89_HW_RATE_OFDM6;
3765 else if (chan->band_type == RTW89_BAND_2G)
3766 beacon_rate = RTW89_HW_RATE_CCK1;
3767 else
3768 beacon_rate = RTW89_HW_RATE_OFDM6;
3769
3770 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
3771 NULL, 0);
3772 if (!skb_beacon) {
3773 rtw89_err(rtwdev, "failed to get beacon skb\n");
3774 return -ENOMEM;
3775 }
3776
3777 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
3778 if (noa_len &&
3779 (noa_len <= skb_tailroom(skb_beacon) ||
3780 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
3781 skb_put_data(skb_beacon, noa_data, noa_len);
3782 }
3783
3784 hdr = (struct ieee80211_hdr *)skb_beacon;
3785 tim_offset -= ieee80211_hdrlen(hdr->frame_control);
3786
3787 bcn_total_len = len + skb_beacon->len;
3788 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
3789 if (!skb) {
3790 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3791 dev_kfree_skb_any(skb_beacon);
3792 return -ENOMEM;
3793 }
3794 skb_put(skb, len);
3795 h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
3796
3797 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) |
3798 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
3799 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
3800 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
3801 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
3802 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
3803 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
3804 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
3805
3806 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
3807 dev_kfree_skb_any(skb_beacon);
3808
3809 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3810 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3811 H2C_FUNC_MAC_BCN_UPD, 0, 1,
3812 bcn_total_len);
3813
3814 ret = rtw89_h2c_tx(rtwdev, skb, false);
3815 if (ret) {
3816 rtw89_err(rtwdev, "failed to send h2c\n");
3817 dev_kfree_skb_any(skb);
3818 return ret;
3819 }
3820
3821 return 0;
3822 }
3823 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
3824
rtw89_fw_h2c_update_beacon_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)3825 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
3826 struct rtw89_vif_link *rtwvif_link)
3827 {
3828 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3829 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3830 struct rtw89_h2c_bcn_upd_be *h2c;
3831 struct sk_buff *skb_beacon;
3832 struct ieee80211_hdr *hdr;
3833 u32 len = sizeof(*h2c);
3834 struct sk_buff *skb;
3835 int bcn_total_len;
3836 u16 beacon_rate;
3837 u16 tim_offset;
3838 void *noa_data;
3839 u8 noa_len;
3840 int ret;
3841
3842 if (vif->p2p)
3843 beacon_rate = RTW89_HW_RATE_OFDM6;
3844 else if (chan->band_type == RTW89_BAND_2G)
3845 beacon_rate = RTW89_HW_RATE_CCK1;
3846 else
3847 beacon_rate = RTW89_HW_RATE_OFDM6;
3848
3849 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
3850 NULL, 0);
3851 if (!skb_beacon) {
3852 rtw89_err(rtwdev, "failed to get beacon skb\n");
3853 return -ENOMEM;
3854 }
3855
3856 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
3857 if (noa_len &&
3858 (noa_len <= skb_tailroom(skb_beacon) ||
3859 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
3860 skb_put_data(skb_beacon, noa_data, noa_len);
3861 }
3862
3863 hdr = (struct ieee80211_hdr *)skb_beacon;
3864 tim_offset -= ieee80211_hdrlen(hdr->frame_control);
3865
3866 bcn_total_len = len + skb_beacon->len;
3867 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
3868 if (!skb) {
3869 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3870 dev_kfree_skb_any(skb_beacon);
3871 return -ENOMEM;
3872 }
3873 skb_put(skb, len);
3874 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
3875
3876 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
3877 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
3878 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
3879 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
3880 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
3881 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
3882 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
3883 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
3884
3885 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
3886 dev_kfree_skb_any(skb_beacon);
3887
3888 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3889 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3890 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
3891 bcn_total_len);
3892
3893 ret = rtw89_h2c_tx(rtwdev, skb, false);
3894 if (ret) {
3895 rtw89_err(rtwdev, "failed to send h2c\n");
3896 goto fail;
3897 }
3898
3899 return 0;
3900
3901 fail:
3902 dev_kfree_skb_any(skb);
3903
3904 return ret;
3905 }
3906 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
3907
rtw89_fw_h2c_role_maintain(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,enum rtw89_upd_mode upd_mode)3908 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
3909 struct rtw89_vif_link *rtwvif_link,
3910 struct rtw89_sta_link *rtwsta_link,
3911 enum rtw89_upd_mode upd_mode)
3912 {
3913 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3914 struct rtw89_h2c_role_maintain *h2c;
3915 u32 len = sizeof(*h2c);
3916 struct sk_buff *skb;
3917 u8 self_role;
3918 int ret;
3919
3920 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
3921 if (rtwsta_link)
3922 self_role = RTW89_SELF_ROLE_AP_CLIENT;
3923 else
3924 self_role = rtwvif_link->self_role;
3925 } else {
3926 self_role = rtwvif_link->self_role;
3927 }
3928
3929 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3930 if (!skb) {
3931 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
3932 return -ENOMEM;
3933 }
3934 skb_put(skb, len);
3935 h2c = (struct rtw89_h2c_role_maintain *)skb->data;
3936
3937 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) |
3938 le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) |
3939 le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) |
3940 le32_encode_bits(rtwvif_link->wifi_role,
3941 RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) |
3942 le32_encode_bits(rtwvif_link->mac_idx,
3943 RTW89_H2C_ROLE_MAINTAIN_W0_BAND) |
3944 le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT);
3945
3946 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3947 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
3948 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
3949 len);
3950
3951 ret = rtw89_h2c_tx(rtwdev, skb, false);
3952 if (ret) {
3953 rtw89_err(rtwdev, "failed to send h2c\n");
3954 goto fail;
3955 }
3956
3957 return 0;
3958 fail:
3959 dev_kfree_skb_any(skb);
3960
3961 return ret;
3962 }
3963
3964 static enum rtw89_fw_sta_type
rtw89_fw_get_sta_type(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3965 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
3966 struct rtw89_sta_link *rtwsta_link)
3967 {
3968 struct ieee80211_bss_conf *bss_conf;
3969 struct ieee80211_link_sta *link_sta;
3970 enum rtw89_fw_sta_type type;
3971
3972 rcu_read_lock();
3973
3974 if (!rtwsta_link)
3975 goto by_vif;
3976
3977 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3978
3979 if (link_sta->eht_cap.has_eht)
3980 type = RTW89_FW_BE_STA;
3981 else if (link_sta->he_cap.has_he)
3982 type = RTW89_FW_AX_STA;
3983 else
3984 type = RTW89_FW_N_AC_STA;
3985
3986 goto out;
3987
3988 by_vif:
3989 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
3990
3991 if (bss_conf->eht_support)
3992 type = RTW89_FW_BE_STA;
3993 else if (bss_conf->he_support)
3994 type = RTW89_FW_AX_STA;
3995 else
3996 type = RTW89_FW_N_AC_STA;
3997
3998 out:
3999 rcu_read_unlock();
4000
4001 return type;
4002 }
4003
rtw89_fw_h2c_join_info(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool dis_conn)4004 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4005 struct rtw89_sta_link *rtwsta_link, bool dis_conn)
4006 {
4007 struct sk_buff *skb;
4008 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
4009 u8 self_role = rtwvif_link->self_role;
4010 enum rtw89_fw_sta_type sta_type;
4011 u8 net_type = rtwvif_link->net_type;
4012 struct rtw89_h2c_join_v1 *h2c_v1;
4013 struct rtw89_h2c_join *h2c;
4014 u32 len = sizeof(*h2c);
4015 bool format_v1 = false;
4016 int ret;
4017
4018 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
4019 len = sizeof(*h2c_v1);
4020 format_v1 = true;
4021 }
4022
4023 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) {
4024 self_role = RTW89_SELF_ROLE_AP_CLIENT;
4025 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
4026 }
4027
4028 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4029 if (!skb) {
4030 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4031 return -ENOMEM;
4032 }
4033 skb_put(skb, len);
4034 h2c = (struct rtw89_h2c_join *)skb->data;
4035
4036 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
4037 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
4038 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
4039 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) |
4040 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) |
4041 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
4042 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
4043 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
4044 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
4045 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
4046 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
4047 le32_encode_bits(rtwvif_link->wifi_role,
4048 RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
4049 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
4050
4051 if (!format_v1)
4052 goto done;
4053
4054 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
4055
4056 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link);
4057
4058 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE);
4059 h2c_v1->w2 = 0;
4060
4061 done:
4062 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4063 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
4064 H2C_FUNC_MAC_JOININFO, 0, 1,
4065 len);
4066
4067 ret = rtw89_h2c_tx(rtwdev, skb, false);
4068 if (ret) {
4069 rtw89_err(rtwdev, "failed to send h2c\n");
4070 goto fail;
4071 }
4072
4073 return 0;
4074 fail:
4075 dev_kfree_skb_any(skb);
4076
4077 return ret;
4078 }
4079
rtw89_fw_h2c_notify_dbcc(struct rtw89_dev * rtwdev,bool en)4080 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en)
4081 {
4082 struct rtw89_h2c_notify_dbcc *h2c;
4083 u32 len = sizeof(*h2c);
4084 struct sk_buff *skb;
4085 int ret;
4086
4087 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4088 if (!skb) {
4089 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n");
4090 return -ENOMEM;
4091 }
4092 skb_put(skb, len);
4093 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data;
4094
4095 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN);
4096
4097 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4098 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
4099 H2C_FUNC_NOTIFY_DBCC, 0, 1,
4100 len);
4101
4102 ret = rtw89_h2c_tx(rtwdev, skb, false);
4103 if (ret) {
4104 rtw89_err(rtwdev, "failed to send h2c\n");
4105 goto fail;
4106 }
4107
4108 return 0;
4109 fail:
4110 dev_kfree_skb_any(skb);
4111
4112 return ret;
4113 }
4114
rtw89_fw_h2c_macid_pause(struct rtw89_dev * rtwdev,u8 sh,u8 grp,bool pause)4115 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
4116 bool pause)
4117 {
4118 struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
4119 struct rtw89_fw_macid_pause_grp *h2c;
4120 __le32 set = cpu_to_le32(BIT(sh));
4121 u8 h2c_macid_pause_id;
4122 struct sk_buff *skb;
4123 u32 len;
4124 int ret;
4125
4126 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
4127 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
4128 len = sizeof(*h2c_new);
4129 } else {
4130 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
4131 len = sizeof(*h2c);
4132 }
4133
4134 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4135 if (!skb) {
4136 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
4137 return -ENOMEM;
4138 }
4139 skb_put(skb, len);
4140
4141 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
4142 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
4143
4144 h2c_new->n[0].pause_mask_grp[grp] = set;
4145 h2c_new->n[0].sleep_mask_grp[grp] = set;
4146 if (pause) {
4147 h2c_new->n[0].pause_grp[grp] = set;
4148 h2c_new->n[0].sleep_grp[grp] = set;
4149 }
4150 } else {
4151 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
4152
4153 h2c->mask_grp[grp] = set;
4154 if (pause)
4155 h2c->pause_grp[grp] = set;
4156 }
4157
4158 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4159 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4160 h2c_macid_pause_id, 1, 0,
4161 len);
4162
4163 ret = rtw89_h2c_tx(rtwdev, skb, false);
4164 if (ret) {
4165 rtw89_err(rtwdev, "failed to send h2c\n");
4166 goto fail;
4167 }
4168
4169 return 0;
4170 fail:
4171 dev_kfree_skb_any(skb);
4172
4173 return ret;
4174 }
4175
4176 #define H2C_EDCA_LEN 12
rtw89_fw_h2c_set_edca(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u8 ac,u32 val)4177 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4178 u8 ac, u32 val)
4179 {
4180 struct sk_buff *skb;
4181 int ret;
4182
4183 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
4184 if (!skb) {
4185 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
4186 return -ENOMEM;
4187 }
4188 skb_put(skb, H2C_EDCA_LEN);
4189 RTW89_SET_EDCA_SEL(skb->data, 0);
4190 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx);
4191 RTW89_SET_EDCA_WMM(skb->data, 0);
4192 RTW89_SET_EDCA_AC(skb->data, ac);
4193 RTW89_SET_EDCA_PARAM(skb->data, val);
4194
4195 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4196 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4197 H2C_FUNC_USR_EDCA, 0, 1,
4198 H2C_EDCA_LEN);
4199
4200 ret = rtw89_h2c_tx(rtwdev, skb, false);
4201 if (ret) {
4202 rtw89_err(rtwdev, "failed to send h2c\n");
4203 goto fail;
4204 }
4205
4206 return 0;
4207 fail:
4208 dev_kfree_skb_any(skb);
4209
4210 return ret;
4211 }
4212
4213 #define H2C_TSF32_TOGL_LEN 4
rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool en)4214 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev,
4215 struct rtw89_vif_link *rtwvif_link,
4216 bool en)
4217 {
4218 struct sk_buff *skb;
4219 u16 early_us = en ? 2000 : 0;
4220 u8 *cmd;
4221 int ret;
4222
4223 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
4224 if (!skb) {
4225 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
4226 return -ENOMEM;
4227 }
4228 skb_put(skb, H2C_TSF32_TOGL_LEN);
4229 cmd = skb->data;
4230
4231 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx);
4232 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
4233 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port);
4234 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
4235
4236 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4237 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4238 H2C_FUNC_TSF32_TOGL, 0, 0,
4239 H2C_TSF32_TOGL_LEN);
4240
4241 ret = rtw89_h2c_tx(rtwdev, skb, false);
4242 if (ret) {
4243 rtw89_err(rtwdev, "failed to send h2c\n");
4244 goto fail;
4245 }
4246
4247 return 0;
4248 fail:
4249 dev_kfree_skb_any(skb);
4250
4251 return ret;
4252 }
4253
4254 #define H2C_OFLD_CFG_LEN 8
rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev * rtwdev)4255 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
4256 {
4257 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
4258 struct sk_buff *skb;
4259 int ret;
4260
4261 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
4262 if (!skb) {
4263 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
4264 return -ENOMEM;
4265 }
4266 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
4267
4268 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4269 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4270 H2C_FUNC_OFLD_CFG, 0, 1,
4271 H2C_OFLD_CFG_LEN);
4272
4273 ret = rtw89_h2c_tx(rtwdev, skb, false);
4274 if (ret) {
4275 rtw89_err(rtwdev, "failed to send h2c\n");
4276 goto fail;
4277 }
4278
4279 return 0;
4280 fail:
4281 dev_kfree_skb_any(skb);
4282
4283 return ret;
4284 }
4285
rtw89_fw_h2c_tx_duty(struct rtw89_dev * rtwdev,u8 lv)4286 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv)
4287 {
4288 struct rtw89_h2c_tx_duty *h2c;
4289 u32 len = sizeof(*h2c);
4290 struct sk_buff *skb;
4291 u16 pause, active;
4292 int ret;
4293
4294 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4295 if (!skb) {
4296 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n");
4297 return -ENOMEM;
4298 }
4299
4300 skb_put(skb, len);
4301 h2c = (struct rtw89_h2c_tx_duty *)skb->data;
4302
4303 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100);
4304
4305 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) {
4306 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP);
4307 } else {
4308 active = 100 - lv * RTW89_THERMAL_PROT_STEP;
4309 pause = 100 - active;
4310
4311 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) |
4312 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK);
4313 }
4314
4315 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4316 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4317 H2C_FUNC_TX_DUTY, 0, 0, len);
4318
4319 ret = rtw89_h2c_tx(rtwdev, skb, false);
4320 if (ret) {
4321 rtw89_err(rtwdev, "failed to send h2c\n");
4322 goto fail;
4323 }
4324
4325 return 0;
4326 fail:
4327 dev_kfree_skb_any(skb);
4328
4329 return ret;
4330 }
4331
rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connect)4332 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
4333 struct rtw89_vif_link *rtwvif_link,
4334 bool connect)
4335 {
4336 struct ieee80211_bss_conf *bss_conf;
4337 s32 thold = RTW89_DEFAULT_CQM_THOLD;
4338 u32 hyst = RTW89_DEFAULT_CQM_HYST;
4339 struct rtw89_h2c_bcnfltr *h2c;
4340 u32 len = sizeof(*h2c);
4341 struct sk_buff *skb;
4342 int ret;
4343
4344 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
4345 return -EINVAL;
4346
4347 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
4348 return -EINVAL;
4349
4350 rcu_read_lock();
4351
4352 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
4353
4354 if (bss_conf->cqm_rssi_hyst)
4355 hyst = bss_conf->cqm_rssi_hyst;
4356 if (bss_conf->cqm_rssi_thold)
4357 thold = bss_conf->cqm_rssi_thold;
4358
4359 rcu_read_unlock();
4360
4361 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4362 if (!skb) {
4363 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
4364 return -ENOMEM;
4365 }
4366
4367 skb_put(skb, len);
4368 h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
4369
4370 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
4371 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
4372 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
4373 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
4374 RTW89_H2C_BCNFLTR_W0_MODE) |
4375 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) |
4376 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
4377 le32_encode_bits(thold + MAX_RSSI,
4378 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
4379 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
4380
4381 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4382 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4383 H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
4384
4385 ret = rtw89_h2c_tx(rtwdev, skb, false);
4386 if (ret) {
4387 rtw89_err(rtwdev, "failed to send h2c\n");
4388 goto fail;
4389 }
4390
4391 return 0;
4392 fail:
4393 dev_kfree_skb_any(skb);
4394
4395 return ret;
4396 }
4397
rtw89_fw_h2c_rssi_offload(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)4398 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
4399 struct rtw89_rx_phy_ppdu *phy_ppdu)
4400 {
4401 struct rtw89_h2c_ofld_rssi *h2c;
4402 u32 len = sizeof(*h2c);
4403 struct sk_buff *skb;
4404 s8 rssi;
4405 int ret;
4406
4407 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
4408 return -EINVAL;
4409
4410 if (!phy_ppdu)
4411 return -EINVAL;
4412
4413 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4414 if (!skb) {
4415 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
4416 return -ENOMEM;
4417 }
4418
4419 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
4420 skb_put(skb, len);
4421 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
4422
4423 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
4424 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
4425 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
4426
4427 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4428 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4429 H2C_FUNC_OFLD_RSSI, 0, 1, len);
4430
4431 ret = rtw89_h2c_tx(rtwdev, skb, false);
4432 if (ret) {
4433 rtw89_err(rtwdev, "failed to send h2c\n");
4434 goto fail;
4435 }
4436
4437 return 0;
4438 fail:
4439 dev_kfree_skb_any(skb);
4440
4441 return ret;
4442 }
4443
rtw89_fw_h2c_tp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)4444 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
4445 {
4446 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
4447 struct rtw89_traffic_stats *stats = &rtwvif->stats;
4448 struct rtw89_h2c_ofld *h2c;
4449 u32 len = sizeof(*h2c);
4450 struct sk_buff *skb;
4451 int ret;
4452
4453 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
4454 return -EINVAL;
4455
4456 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4457 if (!skb) {
4458 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
4459 return -ENOMEM;
4460 }
4461
4462 skb_put(skb, len);
4463 h2c = (struct rtw89_h2c_ofld *)skb->data;
4464
4465 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
4466 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
4467 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
4468
4469 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4470 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4471 H2C_FUNC_OFLD_TP, 0, 1, len);
4472
4473 ret = rtw89_h2c_tx(rtwdev, skb, false);
4474 if (ret) {
4475 rtw89_err(rtwdev, "failed to send h2c\n");
4476 goto fail;
4477 }
4478
4479 return 0;
4480 fail:
4481 dev_kfree_skb_any(skb);
4482
4483 return ret;
4484 }
4485
rtw89_fw_h2c_ra(struct rtw89_dev * rtwdev,struct rtw89_ra_info * ra,bool csi)4486 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
4487 {
4488 const struct rtw89_chip_info *chip = rtwdev->chip;
4489 struct rtw89_h2c_ra_v1 *h2c_v1;
4490 struct rtw89_h2c_ra *h2c;
4491 u32 len = sizeof(*h2c);
4492 bool format_v1 = false;
4493 struct sk_buff *skb;
4494 int ret;
4495
4496 if (chip->chip_gen == RTW89_CHIP_BE) {
4497 len = sizeof(*h2c_v1);
4498 format_v1 = true;
4499 }
4500
4501 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4502 if (!skb) {
4503 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4504 return -ENOMEM;
4505 }
4506 skb_put(skb, len);
4507 h2c = (struct rtw89_h2c_ra *)skb->data;
4508 rtw89_debug(rtwdev, RTW89_DBG_RA,
4509 "ra cmd msk: %llx ", ra->ra_mask);
4510
4511 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
4512 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
4513 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
4514 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
4515 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
4516 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
4517 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
4518 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
4519 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
4520 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
4521 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
4522 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
4523 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
4524 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
4525 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
4526 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
4527 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
4528 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
4529
4530 if (!format_v1)
4531 goto csi;
4532
4533 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
4534 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
4535 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
4536
4537 csi:
4538 if (!csi)
4539 goto done;
4540
4541 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
4542 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
4543 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
4544 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
4545 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
4546 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
4547 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
4548 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
4549 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
4550
4551 done:
4552 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4553 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
4554 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
4555 len);
4556
4557 ret = rtw89_h2c_tx(rtwdev, skb, false);
4558 if (ret) {
4559 rtw89_err(rtwdev, "failed to send h2c\n");
4560 goto fail;
4561 }
4562
4563 return 0;
4564 fail:
4565 dev_kfree_skb_any(skb);
4566
4567 return ret;
4568 }
4569
rtw89_fw_h2c_cxdrv_init(struct rtw89_dev * rtwdev,u8 type)4570 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type)
4571 {
4572 struct rtw89_btc *btc = &rtwdev->btc;
4573 struct rtw89_btc_dm *dm = &btc->dm;
4574 struct rtw89_btc_init_info *init_info = &dm->init_info.init;
4575 struct rtw89_btc_module *module = &init_info->module;
4576 struct rtw89_btc_ant_info *ant = &module->ant;
4577 struct rtw89_h2c_cxinit *h2c;
4578 u32 len = sizeof(*h2c);
4579 struct sk_buff *skb;
4580 int ret;
4581
4582 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4583 if (!skb) {
4584 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
4585 return -ENOMEM;
4586 }
4587 skb_put(skb, len);
4588 h2c = (struct rtw89_h2c_cxinit *)skb->data;
4589
4590 h2c->hdr.type = type;
4591 h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
4592
4593 h2c->ant_type = ant->type;
4594 h2c->ant_num = ant->num;
4595 h2c->ant_iso = ant->isolation;
4596 h2c->ant_info =
4597 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
4598 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
4599 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
4600 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
4601
4602 h2c->mod_rfe = module->rfe_type;
4603 h2c->mod_cv = module->cv;
4604 h2c->mod_info =
4605 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
4606 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
4607 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
4608 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
4609 h2c->mod_adie_kt = module->kt_ver_adie;
4610 h2c->wl_gch = init_info->wl_guard_ch;
4611
4612 h2c->info =
4613 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
4614 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
4615 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
4616 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
4617 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
4618
4619 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4620 H2C_CAT_OUTSRC, BTFC_SET,
4621 SET_DRV_INFO, 0, 0,
4622 len);
4623
4624 ret = rtw89_h2c_tx(rtwdev, skb, false);
4625 if (ret) {
4626 rtw89_err(rtwdev, "failed to send h2c\n");
4627 goto fail;
4628 }
4629
4630 return 0;
4631 fail:
4632 dev_kfree_skb_any(skb);
4633
4634 return ret;
4635 }
4636
rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev * rtwdev,u8 type)4637 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type)
4638 {
4639 struct rtw89_btc *btc = &rtwdev->btc;
4640 struct rtw89_btc_dm *dm = &btc->dm;
4641 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7;
4642 struct rtw89_h2c_cxinit_v7 *h2c;
4643 u32 len = sizeof(*h2c);
4644 struct sk_buff *skb;
4645 int ret;
4646
4647 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4648 if (!skb) {
4649 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n");
4650 return -ENOMEM;
4651 }
4652 skb_put(skb, len);
4653 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data;
4654
4655 h2c->hdr.type = type;
4656 h2c->hdr.ver = btc->ver->fcxinit;
4657 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
4658 h2c->init = *init_info;
4659
4660 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4661 H2C_CAT_OUTSRC, BTFC_SET,
4662 SET_DRV_INFO, 0, 0,
4663 len);
4664
4665 ret = rtw89_h2c_tx(rtwdev, skb, false);
4666 if (ret) {
4667 rtw89_err(rtwdev, "failed to send h2c\n");
4668 goto fail;
4669 }
4670
4671 return 0;
4672 fail:
4673 dev_kfree_skb_any(skb);
4674
4675 return ret;
4676 }
4677
4678 #define PORT_DATA_OFFSET 4
4679 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
4680 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
4681 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
4682
rtw89_fw_h2c_cxdrv_role(struct rtw89_dev * rtwdev,u8 type)4683 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type)
4684 {
4685 struct rtw89_btc *btc = &rtwdev->btc;
4686 const struct rtw89_btc_ver *ver = btc->ver;
4687 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4688 struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
4689 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4690 struct rtw89_btc_wl_active_role *active = role_info->active_role;
4691 struct sk_buff *skb;
4692 u32 len;
4693 u8 offset = 0;
4694 u8 *cmd;
4695 int ret;
4696 int i;
4697
4698 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
4699
4700 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4701 if (!skb) {
4702 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4703 return -ENOMEM;
4704 }
4705 skb_put(skb, len);
4706 cmd = skb->data;
4707
4708 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4709 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4710
4711 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4712 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4713
4714 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4715 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4716 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4717 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4718 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4719 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4720 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4721 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4722 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4723 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4724 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4725 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4726
4727 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4728 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
4729 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
4730 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
4731 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
4732 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
4733 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
4734 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
4735 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
4736 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
4737 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
4738 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
4739 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
4740 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
4741 }
4742
4743 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4744 H2C_CAT_OUTSRC, BTFC_SET,
4745 SET_DRV_INFO, 0, 0,
4746 len);
4747
4748 ret = rtw89_h2c_tx(rtwdev, skb, false);
4749 if (ret) {
4750 rtw89_err(rtwdev, "failed to send h2c\n");
4751 goto fail;
4752 }
4753
4754 return 0;
4755 fail:
4756 dev_kfree_skb_any(skb);
4757
4758 return ret;
4759 }
4760
4761 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
4762 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
4763
rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev * rtwdev,u8 type)4764 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type)
4765 {
4766 struct rtw89_btc *btc = &rtwdev->btc;
4767 const struct rtw89_btc_ver *ver = btc->ver;
4768 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4769 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
4770 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4771 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
4772 struct sk_buff *skb;
4773 u32 len;
4774 u8 *cmd, offset;
4775 int ret;
4776 int i;
4777
4778 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
4779
4780 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4781 if (!skb) {
4782 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4783 return -ENOMEM;
4784 }
4785 skb_put(skb, len);
4786 cmd = skb->data;
4787
4788 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4789 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4790
4791 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4792 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4793
4794 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4795 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4796 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4797 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4798 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4799 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4800 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4801 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4802 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4803 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4804 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4805 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4806
4807 offset = PORT_DATA_OFFSET;
4808 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4809 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
4810 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
4811 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
4812 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
4813 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
4814 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
4815 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
4816 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
4817 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
4818 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
4819 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
4820 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
4821 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
4822 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
4823 }
4824
4825 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
4826 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
4827 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
4828 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
4829 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
4830 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
4831 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
4832
4833 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4834 H2C_CAT_OUTSRC, BTFC_SET,
4835 SET_DRV_INFO, 0, 0,
4836 len);
4837
4838 ret = rtw89_h2c_tx(rtwdev, skb, false);
4839 if (ret) {
4840 rtw89_err(rtwdev, "failed to send h2c\n");
4841 goto fail;
4842 }
4843
4844 return 0;
4845 fail:
4846 dev_kfree_skb_any(skb);
4847
4848 return ret;
4849 }
4850
4851 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
4852 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
4853
rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev * rtwdev,u8 type)4854 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type)
4855 {
4856 struct rtw89_btc *btc = &rtwdev->btc;
4857 const struct rtw89_btc_ver *ver = btc->ver;
4858 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4859 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
4860 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4861 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
4862 struct sk_buff *skb;
4863 u32 len;
4864 u8 *cmd, offset;
4865 int ret;
4866 int i;
4867
4868 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
4869
4870 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4871 if (!skb) {
4872 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4873 return -ENOMEM;
4874 }
4875 skb_put(skb, len);
4876 cmd = skb->data;
4877
4878 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4879 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4880
4881 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4882 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4883
4884 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4885 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4886 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4887 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4888 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4889 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4890 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4891 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4892 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4893 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4894 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4895 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4896
4897 offset = PORT_DATA_OFFSET;
4898 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4899 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
4900 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
4901 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
4902 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
4903 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
4904 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
4905 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
4906 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
4907 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
4908 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
4909 }
4910
4911 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
4912 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
4913 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
4914 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
4915 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
4916 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
4917 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
4918
4919 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4920 H2C_CAT_OUTSRC, BTFC_SET,
4921 SET_DRV_INFO, 0, 0,
4922 len);
4923
4924 ret = rtw89_h2c_tx(rtwdev, skb, false);
4925 if (ret) {
4926 rtw89_err(rtwdev, "failed to send h2c\n");
4927 goto fail;
4928 }
4929
4930 return 0;
4931 fail:
4932 dev_kfree_skb_any(skb);
4933
4934 return ret;
4935 }
4936
rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev * rtwdev,u8 type)4937 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
4938 {
4939 struct rtw89_btc *btc = &rtwdev->btc;
4940 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
4941 struct rtw89_h2c_cxrole_v7 *h2c;
4942 u32 len = sizeof(*h2c);
4943 struct sk_buff *skb;
4944 int ret;
4945
4946 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4947 if (!skb) {
4948 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
4949 return -ENOMEM;
4950 }
4951 skb_put(skb, len);
4952 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
4953
4954 h2c->hdr.type = type;
4955 h2c->hdr.ver = btc->ver->fwlrole;
4956 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
4957 memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
4958 h2c->_u32.role_map = cpu_to_le32(role->role_map);
4959 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
4960 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
4961 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
4962 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
4963 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
4964
4965 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4966 H2C_CAT_OUTSRC, BTFC_SET,
4967 SET_DRV_INFO, 0, 0,
4968 len);
4969
4970 ret = rtw89_h2c_tx(rtwdev, skb, false);
4971 if (ret) {
4972 rtw89_err(rtwdev, "failed to send h2c\n");
4973 goto fail;
4974 }
4975
4976 return 0;
4977 fail:
4978 dev_kfree_skb_any(skb);
4979
4980 return ret;
4981 }
4982
rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev * rtwdev,u8 type)4983 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
4984 {
4985 struct rtw89_btc *btc = &rtwdev->btc;
4986 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8;
4987 struct rtw89_h2c_cxrole_v8 *h2c;
4988 u32 len = sizeof(*h2c);
4989 struct sk_buff *skb;
4990 int ret;
4991
4992 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4993 if (!skb) {
4994 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
4995 return -ENOMEM;
4996 }
4997 skb_put(skb, len);
4998 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
4999
5000 h2c->hdr.type = type;
5001 h2c->hdr.ver = btc->ver->fwlrole;
5002 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5003 memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
5004 h2c->_u32.role_map = cpu_to_le32(role->role_map);
5005 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
5006 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
5007
5008 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5009 H2C_CAT_OUTSRC, BTFC_SET,
5010 SET_DRV_INFO, 0, 0,
5011 len);
5012
5013 ret = rtw89_h2c_tx(rtwdev, skb, false);
5014 if (ret) {
5015 rtw89_err(rtwdev, "failed to send h2c\n");
5016 goto fail;
5017 }
5018
5019 return 0;
5020 fail:
5021 dev_kfree_skb_any(skb);
5022
5023 return ret;
5024 }
5025
5026 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev * rtwdev,u8 type)5027 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
5028 {
5029 struct rtw89_btc *btc = &rtwdev->btc;
5030 const struct rtw89_btc_ver *ver = btc->ver;
5031 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl;
5032 struct sk_buff *skb;
5033 u8 *cmd;
5034 int ret;
5035
5036 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
5037 if (!skb) {
5038 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5039 return -ENOMEM;
5040 }
5041 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
5042 cmd = skb->data;
5043
5044 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5045 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
5046
5047 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
5048 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
5049 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
5050 if (ver->fcxctrl == 0)
5051 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
5052
5053 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5054 H2C_CAT_OUTSRC, BTFC_SET,
5055 SET_DRV_INFO, 0, 0,
5056 H2C_LEN_CXDRVINFO_CTRL);
5057
5058 ret = rtw89_h2c_tx(rtwdev, skb, false);
5059 if (ret) {
5060 rtw89_err(rtwdev, "failed to send h2c\n");
5061 goto fail;
5062 }
5063
5064 return 0;
5065 fail:
5066 dev_kfree_skb_any(skb);
5067
5068 return ret;
5069 }
5070
rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev * rtwdev,u8 type)5071 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
5072 {
5073 struct rtw89_btc *btc = &rtwdev->btc;
5074 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7;
5075 struct rtw89_h2c_cxctrl_v7 *h2c;
5076 u32 len = sizeof(*h2c);
5077 struct sk_buff *skb;
5078 int ret;
5079
5080 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5081 if (!skb) {
5082 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
5083 return -ENOMEM;
5084 }
5085 skb_put(skb, len);
5086 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data;
5087
5088 h2c->hdr.type = type;
5089 h2c->hdr.ver = btc->ver->fcxctrl;
5090 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7;
5091 h2c->ctrl = *ctrl;
5092
5093 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5094 H2C_CAT_OUTSRC, BTFC_SET,
5095 SET_DRV_INFO, 0, 0, len);
5096
5097 ret = rtw89_h2c_tx(rtwdev, skb, false);
5098 if (ret) {
5099 rtw89_err(rtwdev, "failed to send h2c\n");
5100 goto fail;
5101 }
5102
5103 return 0;
5104 fail:
5105 dev_kfree_skb_any(skb);
5106
5107 return ret;
5108 }
5109
5110 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev * rtwdev,u8 type)5111 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type)
5112 {
5113 struct rtw89_btc *btc = &rtwdev->btc;
5114 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
5115 struct sk_buff *skb;
5116 u8 *cmd;
5117 int ret;
5118
5119 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
5120 if (!skb) {
5121 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
5122 return -ENOMEM;
5123 }
5124 skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
5125 cmd = skb->data;
5126
5127 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5128 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
5129
5130 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
5131 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
5132 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
5133 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
5134 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
5135 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
5136 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
5137 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
5138 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
5139 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
5140 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
5141 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
5142 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
5143 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
5144 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
5145 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
5146 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
5147
5148 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5149 H2C_CAT_OUTSRC, BTFC_SET,
5150 SET_DRV_INFO, 0, 0,
5151 H2C_LEN_CXDRVINFO_TRX);
5152
5153 ret = rtw89_h2c_tx(rtwdev, skb, false);
5154 if (ret) {
5155 rtw89_err(rtwdev, "failed to send h2c\n");
5156 goto fail;
5157 }
5158
5159 return 0;
5160 fail:
5161 dev_kfree_skb_any(skb);
5162
5163 return ret;
5164 }
5165
5166 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev * rtwdev,u8 type)5167 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type)
5168 {
5169 struct rtw89_btc *btc = &rtwdev->btc;
5170 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
5171 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
5172 struct sk_buff *skb;
5173 u8 *cmd;
5174 int ret;
5175
5176 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
5177 if (!skb) {
5178 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5179 return -ENOMEM;
5180 }
5181 skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
5182 cmd = skb->data;
5183
5184 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5185 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
5186
5187 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
5188 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
5189 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
5190 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
5191 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
5192
5193 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5194 H2C_CAT_OUTSRC, BTFC_SET,
5195 SET_DRV_INFO, 0, 0,
5196 H2C_LEN_CXDRVINFO_RFK);
5197
5198 ret = rtw89_h2c_tx(rtwdev, skb, false);
5199 if (ret) {
5200 rtw89_err(rtwdev, "failed to send h2c\n");
5201 goto fail;
5202 }
5203
5204 return 0;
5205 fail:
5206 dev_kfree_skb_any(skb);
5207
5208 return ret;
5209 }
5210
5211 #define H2C_LEN_PKT_OFLD 4
rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev * rtwdev,u8 id)5212 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
5213 {
5214 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5215 struct sk_buff *skb;
5216 unsigned int cond;
5217 u8 *cmd;
5218 int ret;
5219
5220 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
5221 if (!skb) {
5222 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
5223 return -ENOMEM;
5224 }
5225 skb_put(skb, H2C_LEN_PKT_OFLD);
5226 cmd = skb->data;
5227
5228 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
5229 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
5230
5231 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5232 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5233 H2C_FUNC_PACKET_OFLD, 1, 1,
5234 H2C_LEN_PKT_OFLD);
5235
5236 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
5237
5238 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5239 if (ret < 0) {
5240 rtw89_debug(rtwdev, RTW89_DBG_FW,
5241 "failed to del pkt ofld: id %d, ret %d\n",
5242 id, ret);
5243 return ret;
5244 }
5245
5246 rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
5247 return 0;
5248 }
5249
rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev * rtwdev,u8 * id,struct sk_buff * skb_ofld)5250 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
5251 struct sk_buff *skb_ofld)
5252 {
5253 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5254 struct sk_buff *skb;
5255 unsigned int cond;
5256 u8 *cmd;
5257 u8 alloc_id;
5258 int ret;
5259
5260 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
5261 RTW89_MAX_PKT_OFLD_NUM);
5262 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
5263 return -ENOSPC;
5264
5265 *id = alloc_id;
5266
5267 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
5268 if (!skb) {
5269 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
5270 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
5271 return -ENOMEM;
5272 }
5273 skb_put(skb, H2C_LEN_PKT_OFLD);
5274 cmd = skb->data;
5275
5276 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
5277 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
5278 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
5279 skb_put_data(skb, skb_ofld->data, skb_ofld->len);
5280
5281 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5282 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5283 H2C_FUNC_PACKET_OFLD, 1, 1,
5284 H2C_LEN_PKT_OFLD + skb_ofld->len);
5285
5286 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
5287
5288 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5289 if (ret < 0) {
5290 rtw89_debug(rtwdev, RTW89_DBG_FW,
5291 "failed to add pkt ofld: id %d, ret %d\n",
5292 alloc_id, ret);
5293 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
5294 return ret;
5295 }
5296
5297 return 0;
5298 }
5299
5300 static
rtw89_fw_h2c_scan_list_offload(struct rtw89_dev * rtwdev,int ch_num,struct list_head * chan_list)5301 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num,
5302 struct list_head *chan_list)
5303 {
5304 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5305 struct rtw89_h2c_chinfo_elem *elem;
5306 struct rtw89_mac_chinfo *ch_info;
5307 struct rtw89_h2c_chinfo *h2c;
5308 struct sk_buff *skb;
5309 unsigned int cond;
5310 int skb_len;
5311 int ret;
5312
5313 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
5314
5315 skb_len = struct_size(h2c, elem, ch_num);
5316 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
5317 if (!skb) {
5318 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
5319 return -ENOMEM;
5320 }
5321 skb_put(skb, sizeof(*h2c));
5322 h2c = (struct rtw89_h2c_chinfo *)skb->data;
5323
5324 h2c->ch_num = ch_num;
5325 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
5326
5327 list_for_each_entry(ch_info, chan_list, list) {
5328 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
5329
5330 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
5331 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
5332 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
5333 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
5334
5335 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
5336 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
5337 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
5338 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
5339 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
5340 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
5341 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
5342 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
5343 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
5344 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
5345
5346 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
5347 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
5348 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
5349 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
5350
5351 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
5352 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
5353 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
5354 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
5355 }
5356
5357 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5358 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5359 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
5360
5361 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
5362
5363 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5364 if (ret) {
5365 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
5366 return ret;
5367 }
5368
5369 return 0;
5370 }
5371
5372 static
rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev * rtwdev,int ch_num,struct list_head * chan_list,struct rtw89_vif_link * rtwvif_link)5373 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
5374 struct list_head *chan_list,
5375 struct rtw89_vif_link *rtwvif_link)
5376 {
5377 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5378 struct rtw89_h2c_chinfo_elem_be *elem;
5379 struct rtw89_mac_chinfo_be *ch_info;
5380 struct rtw89_h2c_chinfo_be *h2c;
5381 struct sk_buff *skb;
5382 unsigned int cond;
5383 u8 ver = U8_MAX;
5384 int skb_len;
5385 int ret;
5386
5387 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
5388
5389 skb_len = struct_size(h2c, elem, ch_num);
5390 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
5391 if (!skb) {
5392 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
5393 return -ENOMEM;
5394 }
5395
5396 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
5397 ver = 0;
5398
5399 skb_put(skb, sizeof(*h2c));
5400 h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
5401
5402 h2c->ch_num = ch_num;
5403 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
5404 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx,
5405 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
5406
5407 list_for_each_entry(ch_info, chan_list, list) {
5408 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
5409
5410 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
5411 le32_encode_bits(ch_info->central_ch,
5412 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
5413 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
5414
5415 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
5416 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
5417 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
5418 le32_encode_bits(ch_info->pause_data,
5419 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
5420 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
5421 le32_encode_bits(ch_info->rand_seq_num,
5422 RTW89_H2C_CHINFO_BE_W1_RANDOM) |
5423 le32_encode_bits(ch_info->notify_action,
5424 RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
5425 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
5426 RTW89_H2C_CHINFO_BE_W1_PROBE) |
5427 le32_encode_bits(ch_info->leave_crit,
5428 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
5429 le32_encode_bits(ch_info->chkpt_timer,
5430 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
5431
5432 elem->w2 = le32_encode_bits(ch_info->leave_time,
5433 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
5434 le32_encode_bits(ch_info->leave_th,
5435 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
5436 le32_encode_bits(ch_info->tx_pkt_ctrl,
5437 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
5438
5439 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
5440 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
5441 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
5442 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
5443
5444 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
5445 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
5446 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
5447 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
5448
5449 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
5450 le32_encode_bits(ch_info->fw_probe0_ssids,
5451 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
5452
5453 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
5454 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
5455 le32_encode_bits(ch_info->fw_probe0_bssids,
5456 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
5457 if (ver == 0)
5458 elem->w0 |=
5459 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
5460 else
5461 elem->w7 = le32_encode_bits(ch_info->period,
5462 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
5463 }
5464
5465 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5466 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5467 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
5468
5469 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
5470
5471 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5472 if (ret) {
5473 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
5474 return ret;
5475 }
5476
5477 return 0;
5478 }
5479
5480 #define RTW89_SCAN_DELAY_TSF_UNIT 104800
rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif_link * rtwvif_link,bool wowlan)5481 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
5482 struct rtw89_scan_option *option,
5483 struct rtw89_vif_link *rtwvif_link,
5484 bool wowlan)
5485 {
5486 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5487 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
5488 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE;
5489 struct rtw89_h2c_scanofld *h2c;
5490 u32 len = sizeof(*h2c);
5491 struct sk_buff *skb;
5492 unsigned int cond;
5493 u64 tsf = 0;
5494 int ret;
5495
5496 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5497 if (!skb) {
5498 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
5499 return -ENOMEM;
5500 }
5501 skb_put(skb, len);
5502 h2c = (struct rtw89_h2c_scanofld *)skb->data;
5503
5504 if (option->delay) {
5505 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
5506 if (ret) {
5507 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret);
5508 scan_mode = RTW89_SCAN_IMMEDIATE;
5509 } else {
5510 scan_mode = RTW89_SCAN_DELAY;
5511 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT;
5512 }
5513 }
5514
5515 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
5516 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
5517 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) |
5518 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
5519
5520 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
5521 le32_encode_bits(option->target_ch_mode,
5522 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
5523 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) |
5524 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
5525
5526 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) |
5527 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD);
5528
5529 if (option->target_ch_mode) {
5530 h2c->w1 |= le32_encode_bits(op->band_width,
5531 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
5532 le32_encode_bits(op->primary_channel,
5533 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
5534 le32_encode_bits(op->channel,
5535 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
5536 h2c->w0 |= le32_encode_bits(op->band_type,
5537 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
5538 }
5539
5540 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf),
5541 RTW89_H2C_SCANOFLD_W3_TSF_HIGH);
5542 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf),
5543 RTW89_H2C_SCANOFLD_W4_TSF_LOW);
5544
5545 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5546 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5547 H2C_FUNC_SCANOFLD, 1, 1,
5548 len);
5549
5550 if (option->enable)
5551 cond = RTW89_SCANOFLD_WAIT_COND_START;
5552 else
5553 cond = RTW89_SCANOFLD_WAIT_COND_STOP;
5554
5555 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5556 if (ret) {
5557 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
5558 return ret;
5559 }
5560
5561 return 0;
5562 }
5563
rtw89_scan_get_6g_disabled_chan(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option)5564 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
5565 struct rtw89_scan_option *option)
5566 {
5567 struct ieee80211_supported_band *sband;
5568 struct ieee80211_channel *chan;
5569 u8 i, idx;
5570
5571 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
5572 if (!sband) {
5573 option->prohib_chan = U64_MAX;
5574 return;
5575 }
5576
5577 for (i = 0; i < sband->n_channels; i++) {
5578 chan = &sband->channels[i];
5579 if (chan->flags & IEEE80211_CHAN_DISABLED) {
5580 idx = (chan->hw_value - 1) / 4;
5581 option->prohib_chan |= BIT(idx);
5582 }
5583 }
5584 }
5585
rtw89_fw_h2c_scan_offload_be(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif_link * rtwvif_link,bool wowlan)5586 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
5587 struct rtw89_scan_option *option,
5588 struct rtw89_vif_link *rtwvif_link,
5589 bool wowlan)
5590 {
5591 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
5592 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
5593 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5594 struct cfg80211_scan_request *req = rtwvif->scan_req;
5595 struct rtw89_h2c_scanofld_be_macc_role *macc_role;
5596 struct rtw89_chan *op = &scan_info->op_chan;
5597 struct rtw89_h2c_scanofld_be_opch *opch;
5598 struct rtw89_pktofld_info *pkt_info;
5599 struct rtw89_h2c_scanofld_be *h2c;
5600 struct sk_buff *skb;
5601 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
5602 u8 opch_size = sizeof(*opch) * option->num_opch;
5603 u8 probe_id[NUM_NL80211_BANDS];
5604 u8 scan_offload_ver = U8_MAX;
5605 u8 cfg_len = sizeof(*h2c);
5606 unsigned int cond;
5607 u8 ver = U8_MAX;
5608 void *ptr;
5609 int ret;
5610 u32 len;
5611 u8 i;
5612
5613 rtw89_scan_get_6g_disabled_chan(rtwdev, option);
5614
5615 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
5616 cfg_len = offsetofend(typeof(*h2c), w8);
5617 scan_offload_ver = 0;
5618 }
5619
5620 len = cfg_len + macc_role_size + opch_size;
5621 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5622 if (!skb) {
5623 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
5624 return -ENOMEM;
5625 }
5626
5627 skb_put(skb, len);
5628 h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
5629 ptr = skb->data;
5630
5631 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
5632
5633 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
5634 ver = 0;
5635
5636 if (!wowlan) {
5637 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
5638 if (pkt_info->wildcard_6ghz) {
5639 /* Provide wildcard as template */
5640 probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
5641 break;
5642 }
5643 }
5644 }
5645
5646 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
5647 le32_encode_bits(option->scan_mode,
5648 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
5649 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
5650 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
5651 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
5652 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
5653 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
5654 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
5655
5656 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
5657 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
5658 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
5659
5660 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
5661 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
5662 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
5663
5664 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
5665 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
5666 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
5667 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
5668
5669 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
5670 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
5671 le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
5672 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
5673 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
5674
5675 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
5676
5677 h2c->w6 = le32_encode_bits(option->prohib_chan,
5678 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
5679 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
5680 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
5681 if (!wowlan && req->no_cck) {
5682 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
5683 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
5684 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
5685 le32_encode_bits(RTW89_HW_RATE_OFDM6,
5686 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) |
5687 le32_encode_bits(RTW89_HW_RATE_OFDM6,
5688 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
5689 }
5690
5691 if (scan_offload_ver == 0)
5692 goto flex_member;
5693
5694 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
5695 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
5696 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0),
5697 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) |
5698 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0),
5699 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP);
5700
5701 flex_member:
5702 ptr += cfg_len;
5703
5704 for (i = 0; i < option->num_macc_role; i++) {
5705 macc_role = ptr;
5706 macc_role->w0 =
5707 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
5708 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
5709 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
5710 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
5711 ptr += sizeof(*macc_role);
5712 }
5713
5714 for (i = 0; i < option->num_opch; i++) {
5715 opch = ptr;
5716 opch->w0 = le32_encode_bits(rtwvif_link->mac_id,
5717 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
5718 le32_encode_bits(option->band,
5719 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
5720 le32_encode_bits(rtwvif_link->port,
5721 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
5722 le32_encode_bits(RTW89_SCAN_OPMODE_INTV,
5723 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
5724 le32_encode_bits(true,
5725 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
5726 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10,
5727 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
5728
5729 opch->w1 = le32_encode_bits(op->band_type,
5730 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
5731 le32_encode_bits(op->band_width,
5732 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
5733 le32_encode_bits(0x3,
5734 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
5735 le32_encode_bits(op->primary_channel,
5736 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
5737 le32_encode_bits(op->channel,
5738 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
5739
5740 opch->w2 = le32_encode_bits(0,
5741 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
5742 le32_encode_bits(0,
5743 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
5744 le32_encode_bits(2,
5745 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS);
5746
5747 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5748 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
5749 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5750 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
5751 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5752 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
5753 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5754 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
5755
5756 if (ver == 0)
5757 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
5758 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
5759 else
5760 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
5761 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
5762 ptr += sizeof(*opch);
5763 }
5764
5765 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5766 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5767 H2C_FUNC_SCANOFLD_BE, 1, 1,
5768 len);
5769
5770 if (option->enable)
5771 cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
5772 else
5773 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
5774
5775 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5776 if (ret) {
5777 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
5778 return ret;
5779 }
5780
5781 return 0;
5782 }
5783
rtw89_fw_h2c_rf_reg(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info,u16 len,u8 page)5784 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
5785 struct rtw89_fw_h2c_rf_reg_info *info,
5786 u16 len, u8 page)
5787 {
5788 struct sk_buff *skb;
5789 u8 class = info->rf_path == RF_PATH_A ?
5790 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
5791 int ret;
5792
5793 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5794 if (!skb) {
5795 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
5796 return -ENOMEM;
5797 }
5798 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
5799
5800 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5801 H2C_CAT_OUTSRC, class, page, 0, 0,
5802 len);
5803
5804 ret = rtw89_h2c_tx(rtwdev, skb, false);
5805 if (ret) {
5806 rtw89_err(rtwdev, "failed to send h2c\n");
5807 goto fail;
5808 }
5809
5810 return 0;
5811 fail:
5812 dev_kfree_skb_any(skb);
5813
5814 return ret;
5815 }
5816
rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev * rtwdev)5817 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
5818 {
5819 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
5820 struct rtw89_fw_h2c_rf_get_mccch *mccch;
5821 struct sk_buff *skb;
5822 int ret;
5823 u8 idx;
5824
5825 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch));
5826 if (!skb) {
5827 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5828 return -ENOMEM;
5829 }
5830 skb_put(skb, sizeof(*mccch));
5831 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
5832
5833 idx = rfk_mcc->table_idx;
5834 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
5835 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
5836 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]);
5837 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]);
5838 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
5839 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
5840
5841 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5842 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
5843 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
5844 sizeof(*mccch));
5845
5846 ret = rtw89_h2c_tx(rtwdev, skb, false);
5847 if (ret) {
5848 rtw89_err(rtwdev, "failed to send h2c\n");
5849 goto fail;
5850 }
5851
5852 return 0;
5853 fail:
5854 dev_kfree_skb_any(skb);
5855
5856 return ret;
5857 }
5858 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
5859
rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)5860 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
5861 enum rtw89_phy_idx phy_idx)
5862 {
5863 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
5864 struct rtw89_fw_h2c_rfk_pre_info_common *common;
5865 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
5866 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
5867 struct rtw89_fw_h2c_rfk_pre_info *h2c;
5868 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
5869 u32 len = sizeof(*h2c);
5870 struct sk_buff *skb;
5871 u8 ver = U8_MAX;
5872 u8 tbl, path;
5873 u32 val32;
5874 int ret;
5875
5876 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
5877 len = sizeof(*h2c_v1);
5878 ver = 1;
5879 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
5880 len = sizeof(*h2c_v0);
5881 ver = 0;
5882 }
5883
5884 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5885 if (!skb) {
5886 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
5887 return -ENOMEM;
5888 }
5889 skb_put(skb, len);
5890 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
5891 common = &h2c->base_v1.common;
5892
5893 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
5894
5895 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
5896 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
5897
5898 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
5899 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
5900 common->dbcc.ch[path][tbl] =
5901 cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
5902 common->dbcc.band[path][tbl] =
5903 cpu_to_le32(rfk_mcc->data[path].band[tbl]);
5904 }
5905 }
5906
5907 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
5908 tbl_sel[path] = rfk_mcc->data[path].table_idx;
5909
5910 common->tbl.cur_ch[path] =
5911 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
5912 common->tbl.cur_band[path] =
5913 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
5914
5915 if (ver <= 1)
5916 continue;
5917
5918 h2c->cur_bandwidth[path] =
5919 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
5920 }
5921
5922 common->phy_idx = cpu_to_le32(phy_idx);
5923
5924 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
5925 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
5926
5927 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]);
5928 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]);
5929 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]);
5930
5931 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
5932 h2c_v0->ktbl_sel0 = cpu_to_le32(val32);
5933 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
5934 h2c_v0->ktbl_sel1 = cpu_to_le32(val32);
5935 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
5936 h2c_v0->rfmod0 = cpu_to_le32(val32);
5937 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
5938 h2c_v0->rfmod1 = cpu_to_le32(val32);
5939
5940 if (rtw89_is_mlo_1_1(rtwdev))
5941 h2c_v0->mlo_1_1 = cpu_to_le32(1);
5942
5943 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
5944
5945 goto done;
5946 }
5947
5948 if (rtw89_is_mlo_1_1(rtwdev)) {
5949 h2c_v1 = &h2c->base_v1;
5950 h2c_v1->mlo_1_1 = cpu_to_le32(1);
5951 }
5952 done:
5953 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5954 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
5955 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
5956 len);
5957
5958 ret = rtw89_h2c_tx(rtwdev, skb, false);
5959 if (ret) {
5960 rtw89_err(rtwdev, "failed to send h2c\n");
5961 goto fail;
5962 }
5963
5964 return 0;
5965 fail:
5966 dev_kfree_skb_any(skb);
5967
5968 return ret;
5969 }
5970
rtw89_fw_h2c_rf_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,enum rtw89_tssi_mode tssi_mode)5971 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
5972 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
5973 {
5974 struct rtw89_hal *hal = &rtwdev->hal;
5975 struct rtw89_h2c_rf_tssi *h2c;
5976 u32 len = sizeof(*h2c);
5977 struct sk_buff *skb;
5978 int ret;
5979
5980 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5981 if (!skb) {
5982 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
5983 return -ENOMEM;
5984 }
5985 skb_put(skb, len);
5986 h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
5987
5988 h2c->len = cpu_to_le16(len);
5989 h2c->phy = phy_idx;
5990 h2c->ch = chan->channel;
5991 h2c->bw = chan->band_width;
5992 h2c->band = chan->band_type;
5993 h2c->hwtx_en = true;
5994 h2c->cv = hal->cv;
5995 h2c->tssi_mode = tssi_mode;
5996
5997 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
5998 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
5999
6000 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6001 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6002 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
6003
6004 ret = rtw89_h2c_tx(rtwdev, skb, false);
6005 if (ret) {
6006 rtw89_err(rtwdev, "failed to send h2c\n");
6007 goto fail;
6008 }
6009
6010 return 0;
6011 fail:
6012 dev_kfree_skb_any(skb);
6013
6014 return ret;
6015 }
6016
rtw89_fw_h2c_rf_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)6017 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6018 const struct rtw89_chan *chan)
6019 {
6020 struct rtw89_h2c_rf_iqk *h2c;
6021 u32 len = sizeof(*h2c);
6022 struct sk_buff *skb;
6023 int ret;
6024
6025 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6026 if (!skb) {
6027 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
6028 return -ENOMEM;
6029 }
6030 skb_put(skb, len);
6031 h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
6032
6033 h2c->phy_idx = cpu_to_le32(phy_idx);
6034 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en);
6035
6036 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6037 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6038 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
6039
6040 ret = rtw89_h2c_tx(rtwdev, skb, false);
6041 if (ret) {
6042 rtw89_err(rtwdev, "failed to send h2c\n");
6043 goto fail;
6044 }
6045
6046 return 0;
6047 fail:
6048 dev_kfree_skb_any(skb);
6049
6050 return ret;
6051 }
6052
rtw89_fw_h2c_rf_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)6053 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6054 const struct rtw89_chan *chan)
6055 {
6056 struct rtw89_h2c_rf_dpk *h2c;
6057 u32 len = sizeof(*h2c);
6058 struct sk_buff *skb;
6059 int ret;
6060
6061 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6062 if (!skb) {
6063 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
6064 return -ENOMEM;
6065 }
6066 skb_put(skb, len);
6067 h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
6068
6069 h2c->len = len;
6070 h2c->phy = phy_idx;
6071 h2c->dpk_enable = true;
6072 h2c->kpath = RF_AB;
6073 h2c->cur_band = chan->band_type;
6074 h2c->cur_bw = chan->band_width;
6075 h2c->cur_ch = chan->channel;
6076 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
6077
6078 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6079 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6080 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
6081
6082 ret = rtw89_h2c_tx(rtwdev, skb, false);
6083 if (ret) {
6084 rtw89_err(rtwdev, "failed to send h2c\n");
6085 goto fail;
6086 }
6087
6088 return 0;
6089 fail:
6090 dev_kfree_skb_any(skb);
6091
6092 return ret;
6093 }
6094
rtw89_fw_h2c_rf_txgapk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)6095 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6096 const struct rtw89_chan *chan)
6097 {
6098 struct rtw89_hal *hal = &rtwdev->hal;
6099 struct rtw89_h2c_rf_txgapk *h2c;
6100 u32 len = sizeof(*h2c);
6101 struct sk_buff *skb;
6102 int ret;
6103
6104 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6105 if (!skb) {
6106 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
6107 return -ENOMEM;
6108 }
6109 skb_put(skb, len);
6110 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
6111
6112 h2c->len = len;
6113 h2c->ktype = 2;
6114 h2c->phy = phy_idx;
6115 h2c->kpath = RF_AB;
6116 h2c->band = chan->band_type;
6117 h2c->bw = chan->band_width;
6118 h2c->ch = chan->channel;
6119 h2c->cv = hal->cv;
6120
6121 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6122 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6123 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
6124
6125 ret = rtw89_h2c_tx(rtwdev, skb, false);
6126 if (ret) {
6127 rtw89_err(rtwdev, "failed to send h2c\n");
6128 goto fail;
6129 }
6130
6131 return 0;
6132 fail:
6133 dev_kfree_skb_any(skb);
6134
6135 return ret;
6136 }
6137
rtw89_fw_h2c_rf_dack(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)6138 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6139 const struct rtw89_chan *chan)
6140 {
6141 struct rtw89_h2c_rf_dack *h2c;
6142 u32 len = sizeof(*h2c);
6143 struct sk_buff *skb;
6144 int ret;
6145
6146 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6147 if (!skb) {
6148 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
6149 return -ENOMEM;
6150 }
6151 skb_put(skb, len);
6152 h2c = (struct rtw89_h2c_rf_dack *)skb->data;
6153
6154 h2c->len = cpu_to_le32(len);
6155 h2c->phy = cpu_to_le32(phy_idx);
6156 h2c->type = cpu_to_le32(0);
6157
6158 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6159 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6160 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
6161
6162 ret = rtw89_h2c_tx(rtwdev, skb, false);
6163 if (ret) {
6164 rtw89_err(rtwdev, "failed to send h2c\n");
6165 goto fail;
6166 }
6167
6168 return 0;
6169 fail:
6170 dev_kfree_skb_any(skb);
6171
6172 return ret;
6173 }
6174
rtw89_fw_h2c_rf_rxdck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,bool is_chl_k)6175 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6176 const struct rtw89_chan *chan, bool is_chl_k)
6177 {
6178 struct rtw89_h2c_rf_rxdck_v0 *v0;
6179 struct rtw89_h2c_rf_rxdck *h2c;
6180 u32 len = sizeof(*h2c);
6181 struct sk_buff *skb;
6182 int ver = -1;
6183 int ret;
6184
6185 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) {
6186 len = sizeof(*v0);
6187 ver = 0;
6188 }
6189
6190 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6191 if (!skb) {
6192 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
6193 return -ENOMEM;
6194 }
6195 skb_put(skb, len);
6196 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data;
6197
6198 v0->len = len;
6199 v0->phy = phy_idx;
6200 v0->is_afe = false;
6201 v0->kpath = RF_AB;
6202 v0->cur_band = chan->band_type;
6203 v0->cur_bw = chan->band_width;
6204 v0->cur_ch = chan->channel;
6205 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
6206
6207 if (ver == 0)
6208 goto hdr;
6209
6210 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
6211 h2c->is_chl_k = is_chl_k;
6212
6213 hdr:
6214 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6215 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6216 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
6217
6218 ret = rtw89_h2c_tx(rtwdev, skb, false);
6219 if (ret) {
6220 rtw89_err(rtwdev, "failed to send h2c\n");
6221 goto fail;
6222 }
6223
6224 return 0;
6225 fail:
6226 dev_kfree_skb_any(skb);
6227
6228 return ret;
6229 }
6230
rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev * rtwdev,u8 h2c_class,u8 h2c_func,u8 * buf,u16 len,bool rack,bool dack)6231 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
6232 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
6233 bool rack, bool dack)
6234 {
6235 struct sk_buff *skb;
6236 int ret;
6237
6238 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6239 if (!skb) {
6240 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
6241 return -ENOMEM;
6242 }
6243 skb_put_data(skb, buf, len);
6244
6245 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6246 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
6247 len);
6248
6249 ret = rtw89_h2c_tx(rtwdev, skb, false);
6250 if (ret) {
6251 rtw89_err(rtwdev, "failed to send h2c\n");
6252 goto fail;
6253 }
6254
6255 return 0;
6256 fail:
6257 dev_kfree_skb_any(skb);
6258
6259 return ret;
6260 }
6261
rtw89_fw_h2c_raw(struct rtw89_dev * rtwdev,const u8 * buf,u16 len)6262 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
6263 {
6264 struct sk_buff *skb;
6265 int ret;
6266
6267 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
6268 if (!skb) {
6269 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
6270 return -ENOMEM;
6271 }
6272 skb_put_data(skb, buf, len);
6273
6274 ret = rtw89_h2c_tx(rtwdev, skb, false);
6275 if (ret) {
6276 rtw89_err(rtwdev, "failed to send h2c\n");
6277 goto fail;
6278 }
6279
6280 return 0;
6281 fail:
6282 dev_kfree_skb_any(skb);
6283
6284 return ret;
6285 }
6286
rtw89_fw_send_all_early_h2c(struct rtw89_dev * rtwdev)6287 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
6288 {
6289 struct rtw89_early_h2c *early_h2c;
6290
6291 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6292
6293 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
6294 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
6295 }
6296 }
6297
__rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)6298 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
6299 {
6300 struct rtw89_early_h2c *early_h2c, *tmp;
6301
6302 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
6303 list_del(&early_h2c->list);
6304 kfree(early_h2c->h2c);
6305 kfree(early_h2c);
6306 }
6307 }
6308
rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)6309 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
6310 {
6311 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6312
6313 __rtw89_fw_free_all_early_h2c(rtwdev);
6314 }
6315
rtw89_fw_c2h_parse_attr(struct sk_buff * c2h)6316 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
6317 {
6318 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
6319 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
6320
6321 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
6322 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
6323 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
6324 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
6325 }
6326
rtw89_fw_c2h_chk_atomic(struct rtw89_dev * rtwdev,struct sk_buff * c2h)6327 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
6328 struct sk_buff *c2h)
6329 {
6330 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
6331 u8 category = attr->category;
6332 u8 class = attr->class;
6333 u8 func = attr->func;
6334
6335 switch (category) {
6336 default:
6337 return false;
6338 case RTW89_C2H_CAT_MAC:
6339 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
6340 case RTW89_C2H_CAT_OUTSRC:
6341 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
6342 }
6343 }
6344
rtw89_fw_c2h_irqsafe(struct rtw89_dev * rtwdev,struct sk_buff * c2h)6345 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
6346 {
6347 rtw89_fw_c2h_parse_attr(c2h);
6348 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
6349 goto enqueue;
6350
6351 rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
6352 dev_kfree_skb_any(c2h);
6353 return;
6354
6355 enqueue:
6356 skb_queue_tail(&rtwdev->c2h_queue, c2h);
6357 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work);
6358 }
6359
rtw89_fw_c2h_cmd_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb)6360 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
6361 struct sk_buff *skb)
6362 {
6363 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
6364 u8 category = attr->category;
6365 u8 class = attr->class;
6366 u8 func = attr->func;
6367 u16 len = attr->len;
6368 bool dump = true;
6369
6370 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
6371 return;
6372
6373 switch (category) {
6374 case RTW89_C2H_CAT_TEST:
6375 break;
6376 case RTW89_C2H_CAT_MAC:
6377 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
6378 if (class == RTW89_MAC_C2H_CLASS_INFO &&
6379 func == RTW89_MAC_C2H_FUNC_C2H_LOG)
6380 dump = false;
6381 break;
6382 case RTW89_C2H_CAT_OUTSRC:
6383 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
6384 class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
6385 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
6386 else
6387 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
6388 break;
6389 }
6390
6391 if (dump)
6392 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
6393 }
6394
rtw89_fw_c2h_work(struct wiphy * wiphy,struct wiphy_work * work)6395 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work)
6396 {
6397 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
6398 c2h_work);
6399 struct sk_buff *skb, *tmp;
6400
6401 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6402
6403 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
6404 skb_unlink(skb, &rtwdev->c2h_queue);
6405 rtw89_fw_c2h_cmd_handle(rtwdev, skb);
6406 dev_kfree_skb_any(skb);
6407 }
6408 }
6409
rtw89_fw_write_h2c_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * info)6410 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
6411 struct rtw89_mac_h2c_info *info)
6412 {
6413 const struct rtw89_chip_info *chip = rtwdev->chip;
6414 struct rtw89_fw_info *fw_info = &rtwdev->fw;
6415 const u32 *h2c_reg = chip->h2c_regs;
6416 u8 i, val, len;
6417 int ret;
6418
6419 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
6420 rtwdev, chip->h2c_ctrl_reg);
6421 if (ret) {
6422 rtw89_warn(rtwdev, "FW does not process h2c registers\n");
6423 return ret;
6424 }
6425
6426 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
6427 sizeof(info->u.h2creg[0]));
6428
6429 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
6430 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
6431
6432 for (i = 0; i < RTW89_H2CREG_MAX; i++)
6433 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
6434
6435 fw_info->h2c_counter++;
6436 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
6437 chip->h2c_counter_reg.mask, fw_info->h2c_counter);
6438 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
6439
6440 return 0;
6441 }
6442
rtw89_fw_read_c2h_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_c2h_info * info)6443 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
6444 struct rtw89_mac_c2h_info *info)
6445 {
6446 const struct rtw89_chip_info *chip = rtwdev->chip;
6447 struct rtw89_fw_info *fw_info = &rtwdev->fw;
6448 const u32 *c2h_reg = chip->c2h_regs;
6449 u32 ret;
6450 u8 i, val;
6451
6452 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
6453
6454 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
6455 RTW89_C2H_TIMEOUT, false, rtwdev,
6456 chip->c2h_ctrl_reg);
6457 if (ret) {
6458 rtw89_warn(rtwdev, "c2h reg timeout\n");
6459 return ret;
6460 }
6461
6462 for (i = 0; i < RTW89_C2HREG_MAX; i++)
6463 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
6464
6465 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
6466
6467 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
6468 info->content_len =
6469 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
6470 RTW89_C2HREG_HDR_LEN;
6471
6472 fw_info->c2h_counter++;
6473 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
6474 chip->c2h_counter_reg.mask, fw_info->c2h_counter);
6475
6476 return 0;
6477 }
6478
rtw89_fw_msg_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * h2c_info,struct rtw89_mac_c2h_info * c2h_info)6479 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
6480 struct rtw89_mac_h2c_info *h2c_info,
6481 struct rtw89_mac_c2h_info *c2h_info)
6482 {
6483 u32 ret;
6484
6485 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
6486 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6487
6488 if (!h2c_info && !c2h_info)
6489 return -EINVAL;
6490
6491 if (!h2c_info)
6492 goto recv_c2h;
6493
6494 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
6495 if (ret)
6496 return ret;
6497
6498 recv_c2h:
6499 if (!c2h_info)
6500 return 0;
6501
6502 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
6503 if (ret)
6504 return ret;
6505
6506 return 0;
6507 }
6508
rtw89_fw_st_dbg_dump(struct rtw89_dev * rtwdev)6509 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
6510 {
6511 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
6512 rtw89_err(rtwdev, "[ERR]pwr is off\n");
6513 return;
6514 }
6515
6516 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
6517 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
6518 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
6519 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
6520 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
6521 rtw89_read32(rtwdev, R_AX_HALT_C2H));
6522 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
6523 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
6524
6525 rtw89_fw_prog_cnt_dump(rtwdev);
6526 }
6527
rtw89_release_pkt_list(struct rtw89_dev * rtwdev)6528 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev)
6529 {
6530 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
6531 struct rtw89_pktofld_info *info, *tmp;
6532 u8 idx;
6533
6534 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
6535 if (!(rtwdev->chip->support_bands & BIT(idx)))
6536 continue;
6537
6538 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
6539 if (test_bit(info->id, rtwdev->pkt_offload))
6540 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
6541 list_del(&info->list);
6542 kfree(info);
6543 }
6544 }
6545 }
6546
rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev * rtwdev,struct cfg80211_scan_request * req,struct rtw89_pktofld_info * info,enum nl80211_band band,u8 ssid_idx)6547 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
6548 struct cfg80211_scan_request *req,
6549 struct rtw89_pktofld_info *info,
6550 enum nl80211_band band, u8 ssid_idx)
6551 {
6552 if (band != NL80211_BAND_6GHZ)
6553 return false;
6554
6555 if (req->ssids[ssid_idx].ssid_len) {
6556 memcpy(info->ssid, req->ssids[ssid_idx].ssid,
6557 req->ssids[ssid_idx].ssid_len);
6558 info->ssid_len = req->ssids[ssid_idx].ssid_len;
6559 return false;
6560 } else {
6561 info->wildcard_6ghz = true;
6562 return true;
6563 }
6564 }
6565
rtw89_append_probe_req_ie(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct sk_buff * skb,u8 ssid_idx)6566 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
6567 struct rtw89_vif_link *rtwvif_link,
6568 struct sk_buff *skb, u8 ssid_idx)
6569 {
6570 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6571 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6572 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
6573 struct cfg80211_scan_request *req = rtwvif->scan_req;
6574 struct rtw89_pktofld_info *info;
6575 struct sk_buff *new;
6576 int ret = 0;
6577 u8 band;
6578
6579 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
6580 if (!(rtwdev->chip->support_bands & BIT(band)))
6581 continue;
6582
6583 new = skb_copy(skb, GFP_KERNEL);
6584 if (!new) {
6585 ret = -ENOMEM;
6586 goto out;
6587 }
6588 skb_put_data(new, ies->ies[band], ies->len[band]);
6589 skb_put_data(new, ies->common_ies, ies->common_ie_len);
6590
6591 info = kzalloc(sizeof(*info), GFP_KERNEL);
6592 if (!info) {
6593 ret = -ENOMEM;
6594 kfree_skb(new);
6595 goto out;
6596 }
6597
6598 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx);
6599
6600 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
6601 if (ret) {
6602 kfree_skb(new);
6603 kfree(info);
6604 goto out;
6605 }
6606
6607 list_add_tail(&info->list, &scan_info->pkt_list[band]);
6608 kfree_skb(new);
6609 }
6610 out:
6611 return ret;
6612 }
6613
rtw89_hw_scan_update_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)6614 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
6615 struct rtw89_vif_link *rtwvif_link)
6616 {
6617 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6618 struct cfg80211_scan_request *req = rtwvif->scan_req;
6619 struct sk_buff *skb;
6620 u8 num = req->n_ssids, i;
6621 int ret;
6622
6623 for (i = 0; i < num; i++) {
6624 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
6625 req->ssids[i].ssid,
6626 req->ssids[i].ssid_len,
6627 req->ie_len);
6628 if (!skb)
6629 return -ENOMEM;
6630
6631 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i);
6632 kfree_skb(skb);
6633
6634 if (ret)
6635 return ret;
6636 }
6637
6638 return 0;
6639 }
6640
rtw89_update_6ghz_rnr_chan(struct rtw89_dev * rtwdev,struct ieee80211_scan_ies * ies,struct cfg80211_scan_request * req,struct rtw89_mac_chinfo * ch_info)6641 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev,
6642 struct ieee80211_scan_ies *ies,
6643 struct cfg80211_scan_request *req,
6644 struct rtw89_mac_chinfo *ch_info)
6645 {
6646 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6647 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
6648 struct cfg80211_scan_6ghz_params *params;
6649 struct rtw89_pktofld_info *info, *tmp;
6650 struct ieee80211_hdr *hdr;
6651 struct sk_buff *skb;
6652 bool found;
6653 int ret = 0;
6654 u8 i;
6655
6656 if (!req->n_6ghz_params)
6657 return 0;
6658
6659 for (i = 0; i < req->n_6ghz_params; i++) {
6660 params = &req->scan_6ghz_params[i];
6661
6662 if (req->channels[params->channel_idx]->hw_value !=
6663 ch_info->pri_ch)
6664 continue;
6665
6666 found = false;
6667 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
6668 if (ether_addr_equal(tmp->bssid, params->bssid)) {
6669 found = true;
6670 break;
6671 }
6672 }
6673 if (found)
6674 continue;
6675
6676 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
6677 NULL, 0, req->ie_len);
6678 if (!skb)
6679 return -ENOMEM;
6680
6681 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
6682 skb_put_data(skb, ies->common_ies, ies->common_ie_len);
6683 hdr = (struct ieee80211_hdr *)skb->data;
6684 ether_addr_copy(hdr->addr3, params->bssid);
6685
6686 info = kzalloc(sizeof(*info), GFP_KERNEL);
6687 if (!info) {
6688 ret = -ENOMEM;
6689 kfree_skb(skb);
6690 goto out;
6691 }
6692
6693 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
6694 if (ret) {
6695 kfree_skb(skb);
6696 kfree(info);
6697 goto out;
6698 }
6699
6700 ether_addr_copy(info->bssid, params->bssid);
6701 info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
6702 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
6703
6704 ch_info->tx_pkt = true;
6705 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
6706
6707 kfree_skb(skb);
6708 }
6709
6710 out:
6711 return ret;
6712 }
6713
rtw89_pno_scan_add_chan_ax(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo * ch_info)6714 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
6715 int chan_type, int ssid_num,
6716 struct rtw89_mac_chinfo *ch_info)
6717 {
6718 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6719 struct rtw89_pktofld_info *info;
6720 u8 probe_count = 0;
6721
6722 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6723 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6724 ch_info->bw = RTW89_SCAN_WIDTH;
6725 ch_info->tx_pkt = true;
6726 ch_info->cfg_tx_pwr = false;
6727 ch_info->tx_pwr_idx = 0;
6728 ch_info->tx_null = false;
6729 ch_info->pause_data = false;
6730 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6731
6732 if (ssid_num) {
6733 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
6734 if (info->channel_6ghz &&
6735 ch_info->pri_ch != info->channel_6ghz)
6736 continue;
6737 else if (info->channel_6ghz && probe_count != 0)
6738 ch_info->period += RTW89_CHANNEL_TIME_6G;
6739
6740 if (info->wildcard_6ghz)
6741 continue;
6742
6743 ch_info->pkt_id[probe_count++] = info->id;
6744 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6745 break;
6746 }
6747 ch_info->num_pkt = probe_count;
6748 }
6749
6750 switch (chan_type) {
6751 case RTW89_CHAN_DFS:
6752 if (ch_info->ch_band != RTW89_BAND_6G)
6753 ch_info->period = max_t(u8, ch_info->period,
6754 RTW89_DFS_CHAN_TIME);
6755 ch_info->dwell_time = RTW89_DWELL_TIME;
6756 break;
6757 case RTW89_CHAN_ACTIVE:
6758 break;
6759 default:
6760 rtw89_err(rtwdev, "Channel type out of bound\n");
6761 }
6762 }
6763
rtw89_hw_scan_add_chan(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo * ch_info)6764 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
6765 int ssid_num,
6766 struct rtw89_mac_chinfo *ch_info)
6767 {
6768 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6769 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6770 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6771 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
6772 struct cfg80211_scan_request *req = rtwvif->scan_req;
6773 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
6774 struct rtw89_pktofld_info *info;
6775 u8 band, probe_count = 0;
6776 int ret;
6777
6778 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6779 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6780 ch_info->bw = RTW89_SCAN_WIDTH;
6781 ch_info->tx_pkt = true;
6782 ch_info->cfg_tx_pwr = false;
6783 ch_info->tx_pwr_idx = 0;
6784 ch_info->tx_null = false;
6785 ch_info->pause_data = false;
6786 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6787
6788 if (ch_info->ch_band == RTW89_BAND_6G) {
6789 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
6790 !ch_info->is_psc) {
6791 ch_info->tx_pkt = false;
6792 if (!req->duration_mandatory)
6793 ch_info->period -= RTW89_DWELL_TIME_6G;
6794 }
6795 }
6796
6797 ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info);
6798 if (ret)
6799 rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
6800
6801 if (ssid_num) {
6802 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
6803
6804 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
6805 if (info->channel_6ghz &&
6806 ch_info->pri_ch != info->channel_6ghz)
6807 continue;
6808 else if (info->channel_6ghz && probe_count != 0)
6809 ch_info->period += RTW89_CHANNEL_TIME_6G;
6810
6811 if (info->wildcard_6ghz)
6812 continue;
6813
6814 ch_info->pkt_id[probe_count++] = info->id;
6815 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6816 break;
6817 }
6818 ch_info->num_pkt = probe_count;
6819 }
6820
6821 switch (chan_type) {
6822 case RTW89_CHAN_OPERATE:
6823 ch_info->central_ch = op->channel;
6824 ch_info->pri_ch = op->primary_channel;
6825 ch_info->ch_band = op->band_type;
6826 ch_info->bw = op->band_width;
6827 ch_info->tx_null = true;
6828 ch_info->num_pkt = 0;
6829 break;
6830 case RTW89_CHAN_DFS:
6831 if (ch_info->ch_band != RTW89_BAND_6G)
6832 ch_info->period = max_t(u8, ch_info->period,
6833 RTW89_DFS_CHAN_TIME);
6834 ch_info->dwell_time = RTW89_DWELL_TIME;
6835 ch_info->pause_data = true;
6836 break;
6837 case RTW89_CHAN_ACTIVE:
6838 ch_info->pause_data = true;
6839 break;
6840 default:
6841 rtw89_err(rtwdev, "Channel type out of bound\n");
6842 }
6843 }
6844
rtw89_pno_scan_add_chan_be(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_be * ch_info)6845 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
6846 int ssid_num,
6847 struct rtw89_mac_chinfo_be *ch_info)
6848 {
6849 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6850 struct rtw89_pktofld_info *info;
6851 u8 probe_count = 0, i;
6852
6853 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6854 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6855 ch_info->bw = RTW89_SCAN_WIDTH;
6856 ch_info->tx_null = false;
6857 ch_info->pause_data = false;
6858 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6859
6860 if (ssid_num) {
6861 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
6862 ch_info->pkt_id[probe_count++] = info->id;
6863 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6864 break;
6865 }
6866 }
6867
6868 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
6869 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
6870
6871 switch (chan_type) {
6872 case RTW89_CHAN_DFS:
6873 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
6874 ch_info->dwell_time = RTW89_DWELL_TIME;
6875 break;
6876 case RTW89_CHAN_ACTIVE:
6877 break;
6878 default:
6879 rtw89_warn(rtwdev, "Channel type out of bound\n");
6880 break;
6881 }
6882 }
6883
rtw89_hw_scan_add_chan_be(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_be * ch_info)6884 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
6885 int ssid_num,
6886 struct rtw89_mac_chinfo_be *ch_info)
6887 {
6888 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6889 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
6890 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6891 struct cfg80211_scan_request *req = rtwvif->scan_req;
6892 struct rtw89_pktofld_info *info;
6893 u8 band, probe_count = 0, i;
6894
6895 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
6896 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
6897 ch_info->bw = RTW89_SCAN_WIDTH;
6898 ch_info->tx_null = false;
6899 ch_info->pause_data = false;
6900 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6901
6902 if (ssid_num) {
6903 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
6904
6905 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
6906 if (info->channel_6ghz &&
6907 ch_info->pri_ch != info->channel_6ghz)
6908 continue;
6909
6910 if (info->wildcard_6ghz)
6911 continue;
6912
6913 ch_info->pkt_id[probe_count++] = info->id;
6914 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
6915 break;
6916 }
6917 }
6918
6919 if (ch_info->ch_band == RTW89_BAND_6G) {
6920 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
6921 !ch_info->is_psc) {
6922 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
6923 if (!req->duration_mandatory)
6924 ch_info->period -= RTW89_DWELL_TIME_6G;
6925 }
6926 }
6927
6928 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
6929 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
6930
6931 switch (chan_type) {
6932 case RTW89_CHAN_DFS:
6933 if (ch_info->ch_band != RTW89_BAND_6G)
6934 ch_info->period =
6935 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
6936 ch_info->dwell_time = RTW89_DWELL_TIME;
6937 ch_info->pause_data = true;
6938 break;
6939 case RTW89_CHAN_ACTIVE:
6940 ch_info->pause_data = true;
6941 break;
6942 default:
6943 rtw89_warn(rtwdev, "Channel type out of bound\n");
6944 break;
6945 }
6946 }
6947
rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)6948 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
6949 struct rtw89_vif_link *rtwvif_link)
6950 {
6951 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
6952 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
6953 struct rtw89_mac_chinfo *ch_info, *tmp;
6954 struct ieee80211_channel *channel;
6955 struct list_head chan_list;
6956 int list_len;
6957 enum rtw89_chan_type type;
6958 int ret = 0;
6959 u32 idx;
6960
6961 INIT_LIST_HEAD(&chan_list);
6962 for (idx = 0, list_len = 0;
6963 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
6964 idx++, list_len++) {
6965 channel = nd_config->channels[idx];
6966 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
6967 if (!ch_info) {
6968 ret = -ENOMEM;
6969 goto out;
6970 }
6971
6972 ch_info->period = RTW89_CHANNEL_TIME;
6973 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
6974 ch_info->central_ch = channel->hw_value;
6975 ch_info->pri_ch = channel->hw_value;
6976 ch_info->is_psc = cfg80211_channel_is_psc(channel);
6977
6978 if (channel->flags &
6979 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
6980 type = RTW89_CHAN_DFS;
6981 else
6982 type = RTW89_CHAN_ACTIVE;
6983
6984 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
6985 list_add_tail(&ch_info->list, &chan_list);
6986 }
6987 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
6988
6989 out:
6990 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
6991 list_del(&ch_info->list);
6992 kfree(ch_info);
6993 }
6994
6995 return ret;
6996 }
6997
rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connected)6998 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
6999 struct rtw89_vif_link *rtwvif_link, bool connected)
7000 {
7001 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7002 struct cfg80211_scan_request *req = rtwvif->scan_req;
7003 struct rtw89_mac_chinfo *ch_info, *tmp;
7004 struct ieee80211_channel *channel;
7005 struct list_head chan_list;
7006 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
7007 int list_len, off_chan_time = 0;
7008 enum rtw89_chan_type type;
7009 int ret = 0;
7010 u32 idx;
7011
7012 INIT_LIST_HEAD(&chan_list);
7013 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
7014 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
7015 idx++, list_len++) {
7016 channel = req->channels[idx];
7017 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7018 if (!ch_info) {
7019 ret = -ENOMEM;
7020 goto out;
7021 }
7022
7023 if (req->duration)
7024 ch_info->period = req->duration;
7025 else if (channel->band == NL80211_BAND_6GHZ)
7026 ch_info->period = RTW89_CHANNEL_TIME_6G +
7027 RTW89_DWELL_TIME_6G;
7028 else
7029 ch_info->period = RTW89_CHANNEL_TIME;
7030
7031 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7032 ch_info->central_ch = channel->hw_value;
7033 ch_info->pri_ch = channel->hw_value;
7034 ch_info->rand_seq_num = random_seq;
7035 ch_info->is_psc = cfg80211_channel_is_psc(channel);
7036
7037 if (channel->flags &
7038 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7039 type = RTW89_CHAN_DFS;
7040 else
7041 type = RTW89_CHAN_ACTIVE;
7042 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info);
7043
7044 if (connected &&
7045 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) {
7046 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
7047 if (!tmp) {
7048 ret = -ENOMEM;
7049 kfree(ch_info);
7050 goto out;
7051 }
7052
7053 type = RTW89_CHAN_OPERATE;
7054 tmp->period = req->duration_mandatory ?
7055 req->duration : RTW89_CHANNEL_TIME;
7056 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp);
7057 list_add_tail(&tmp->list, &chan_list);
7058 off_chan_time = 0;
7059 list_len++;
7060 }
7061 list_add_tail(&ch_info->list, &chan_list);
7062 off_chan_time += ch_info->period;
7063 }
7064 rtwdev->scan_info.last_chan_idx = idx;
7065 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list);
7066
7067 out:
7068 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7069 list_del(&ch_info->list);
7070 kfree(ch_info);
7071 }
7072
7073 return ret;
7074 }
7075
rtw89_pno_scan_add_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7076 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
7077 struct rtw89_vif_link *rtwvif_link)
7078 {
7079 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7080 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
7081 struct rtw89_mac_chinfo_be *ch_info, *tmp;
7082 struct ieee80211_channel *channel;
7083 struct list_head chan_list;
7084 enum rtw89_chan_type type;
7085 int list_len, ret;
7086 u32 idx;
7087
7088 INIT_LIST_HEAD(&chan_list);
7089
7090 for (idx = 0, list_len = 0;
7091 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
7092 idx++, list_len++) {
7093 channel = nd_config->channels[idx];
7094 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7095 if (!ch_info) {
7096 ret = -ENOMEM;
7097 goto out;
7098 }
7099
7100 ch_info->period = RTW89_CHANNEL_TIME;
7101 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7102 ch_info->central_ch = channel->hw_value;
7103 ch_info->pri_ch = channel->hw_value;
7104 ch_info->is_psc = cfg80211_channel_is_psc(channel);
7105
7106 if (channel->flags &
7107 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7108 type = RTW89_CHAN_DFS;
7109 else
7110 type = RTW89_CHAN_ACTIVE;
7111
7112 rtw89_pno_scan_add_chan_be(rtwdev, type,
7113 nd_config->n_match_sets, ch_info);
7114 list_add_tail(&ch_info->list, &chan_list);
7115 }
7116
7117 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
7118 rtwvif_link);
7119
7120 out:
7121 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7122 list_del(&ch_info->list);
7123 kfree(ch_info);
7124 }
7125
7126 return ret;
7127 }
7128
rtw89_hw_scan_add_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connected)7129 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
7130 struct rtw89_vif_link *rtwvif_link, bool connected)
7131 {
7132 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7133 struct cfg80211_scan_request *req = rtwvif->scan_req;
7134 struct rtw89_mac_chinfo_be *ch_info, *tmp;
7135 struct ieee80211_channel *channel;
7136 struct list_head chan_list;
7137 enum rtw89_chan_type type;
7138 int list_len, ret;
7139 bool random_seq;
7140 u32 idx;
7141
7142 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
7143 INIT_LIST_HEAD(&chan_list);
7144
7145 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0;
7146 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
7147 idx++, list_len++) {
7148 channel = req->channels[idx];
7149 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7150 if (!ch_info) {
7151 ret = -ENOMEM;
7152 goto out;
7153 }
7154
7155 if (req->duration)
7156 ch_info->period = req->duration;
7157 else if (channel->band == NL80211_BAND_6GHZ)
7158 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
7159 else
7160 ch_info->period = RTW89_CHANNEL_TIME;
7161
7162 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7163 ch_info->central_ch = channel->hw_value;
7164 ch_info->pri_ch = channel->hw_value;
7165 ch_info->rand_seq_num = random_seq;
7166 ch_info->is_psc = cfg80211_channel_is_psc(channel);
7167
7168 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7169 type = RTW89_CHAN_DFS;
7170 else
7171 type = RTW89_CHAN_ACTIVE;
7172 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
7173
7174 list_add_tail(&ch_info->list, &chan_list);
7175 }
7176
7177 rtwdev->scan_info.last_chan_idx = idx;
7178 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
7179 rtwvif_link);
7180
7181 out:
7182 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7183 list_del(&ch_info->list);
7184 kfree(ch_info);
7185 }
7186
7187 return ret;
7188 }
7189
rtw89_hw_scan_prehandle(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connected)7190 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
7191 struct rtw89_vif_link *rtwvif_link, bool connected)
7192 {
7193 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7194 int ret;
7195
7196 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link);
7197 if (ret) {
7198 rtw89_err(rtwdev, "Update probe request failed\n");
7199 goto out;
7200 }
7201 ret = mac->add_chan_list(rtwdev, rtwvif_link, connected);
7202 out:
7203 return ret;
7204 }
7205
rtw89_hw_scan_start(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_scan_request * scan_req)7206 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
7207 struct rtw89_vif_link *rtwvif_link,
7208 struct ieee80211_scan_request *scan_req)
7209 {
7210 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7211 struct cfg80211_scan_request *req = &scan_req->req;
7212 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
7213 rtwvif_link->chanctx_idx);
7214 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7215 u32 rx_fltr = rtwdev->hal.rx_fltr;
7216 u8 mac_addr[ETH_ALEN];
7217 u32 reg;
7218
7219 /* clone op and keep it during scan */
7220 rtwdev->scan_info.op_chan = *chan;
7221
7222 rtwdev->scan_info.scanning_vif = rtwvif_link;
7223 rtwdev->scan_info.last_chan_idx = 0;
7224 rtwdev->scan_info.abort = false;
7225 rtwvif->scan_ies = &scan_req->ies;
7226 rtwvif->scan_req = req;
7227 ieee80211_stop_queues(rtwdev->hw);
7228 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
7229
7230 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
7231 get_random_mask_addr(mac_addr, req->mac_addr,
7232 req->mac_addr_mask);
7233 else
7234 ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
7235 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true);
7236
7237 rx_fltr &= ~B_AX_A_BCN_CHK_EN;
7238 rx_fltr &= ~B_AX_A_BC;
7239 rx_fltr &= ~B_AX_A_A1_MATCH;
7240
7241 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
7242 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr);
7243
7244 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN);
7245 }
7246
7247 struct rtw89_hw_scan_complete_cb_data {
7248 struct rtw89_vif_link *rtwvif_link;
7249 bool aborted;
7250 };
7251
rtw89_hw_scan_complete_cb(struct rtw89_dev * rtwdev,void * data)7252 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
7253 {
7254 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7255 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7256 struct rtw89_hw_scan_complete_cb_data *cb_data = data;
7257 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
7258 struct cfg80211_scan_info info = {
7259 .aborted = cb_data->aborted,
7260 };
7261 struct rtw89_vif *rtwvif;
7262 u32 reg;
7263
7264 if (!rtwvif_link)
7265 return -EINVAL;
7266
7267 rtwvif = rtwvif_link->rtwvif;
7268
7269 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
7270 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
7271
7272 rtw89_core_scan_complete(rtwdev, rtwvif_link, true);
7273 ieee80211_scan_completed(rtwdev->hw, &info);
7274 ieee80211_wake_queues(rtwdev->hw);
7275 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
7276 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
7277
7278 rtw89_release_pkt_list(rtwdev);
7279 rtwvif->scan_req = NULL;
7280 rtwvif->scan_ies = NULL;
7281 scan_info->last_chan_idx = 0;
7282 scan_info->scanning_vif = NULL;
7283 scan_info->abort = false;
7284
7285 return 0;
7286 }
7287
rtw89_hw_scan_complete(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool aborted)7288 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
7289 struct rtw89_vif_link *rtwvif_link,
7290 bool aborted)
7291 {
7292 struct rtw89_hw_scan_complete_cb_data cb_data = {
7293 .rtwvif_link = rtwvif_link,
7294 .aborted = aborted,
7295 };
7296 const struct rtw89_chanctx_cb_parm cb_parm = {
7297 .cb = rtw89_hw_scan_complete_cb,
7298 .data = &cb_data,
7299 .caller = __func__,
7300 };
7301
7302 /* The things here needs to be done after setting channel (for coex)
7303 * and before proceeding entity mode (for MCC). So, pass a callback
7304 * of them for the right sequence rather than doing them directly.
7305 */
7306 rtw89_chanctx_proceed(rtwdev, &cb_parm);
7307 }
7308
rtw89_hw_scan_abort(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7309 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
7310 struct rtw89_vif_link *rtwvif_link)
7311 {
7312 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7313 int ret;
7314
7315 scan_info->abort = true;
7316
7317 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false);
7318 if (ret)
7319 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret);
7320
7321 /* Indicate ieee80211_scan_completed() before returning, which is safe
7322 * because scan abort command always waits for completion of
7323 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan
7324 * work properly.
7325 */
7326 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true);
7327 }
7328
rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev * rtwdev)7329 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
7330 {
7331 struct rtw89_vif_link *rtwvif_link;
7332 struct rtw89_vif *rtwvif;
7333 unsigned int link_id;
7334
7335 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
7336 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
7337 /* This variable implies connected or during attempt to connect */
7338 if (!is_zero_ether_addr(rtwvif_link->bssid))
7339 return true;
7340 }
7341 }
7342
7343 return false;
7344 }
7345
rtw89_hw_scan_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7346 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
7347 struct rtw89_vif_link *rtwvif_link,
7348 bool enable)
7349 {
7350 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7351 struct rtw89_scan_option opt = {0};
7352 bool connected;
7353 int ret = 0;
7354
7355 if (!rtwvif_link)
7356 return -EINVAL;
7357
7358 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
7359 opt.enable = enable;
7360 opt.target_ch_mode = connected;
7361 if (enable) {
7362 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected);
7363 if (ret)
7364 goto out;
7365 }
7366
7367 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
7368 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
7369 opt.scan_mode = RTW89_SCAN_MODE_SA;
7370 opt.band = rtwvif_link->mac_idx;
7371 opt.num_macc_role = 0;
7372 opt.mlo_mode = rtwdev->mlo_dbcc_mode;
7373 opt.num_opch = connected ? 1 : 0;
7374 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
7375 }
7376
7377 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false);
7378 out:
7379 return ret;
7380 }
7381
7382 #define H2C_FW_CPU_EXCEPTION_LEN 4
7383 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566
rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev * rtwdev)7384 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
7385 {
7386 struct sk_buff *skb;
7387 int ret;
7388
7389 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN);
7390 if (!skb) {
7391 rtw89_err(rtwdev,
7392 "failed to alloc skb for fw cpu exception\n");
7393 return -ENOMEM;
7394 }
7395
7396 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN);
7397 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data,
7398 H2C_FW_CPU_EXCEPTION_TYPE_DEF);
7399
7400 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7401 H2C_CAT_TEST,
7402 H2C_CL_FW_STATUS_TEST,
7403 H2C_FUNC_CPU_EXCEPTION, 0, 0,
7404 H2C_FW_CPU_EXCEPTION_LEN);
7405
7406 ret = rtw89_h2c_tx(rtwdev, skb, false);
7407 if (ret) {
7408 rtw89_err(rtwdev, "failed to send h2c\n");
7409 goto fail;
7410 }
7411
7412 return 0;
7413
7414 fail:
7415 dev_kfree_skb_any(skb);
7416 return ret;
7417 }
7418
7419 #define H2C_PKT_DROP_LEN 24
rtw89_fw_h2c_pkt_drop(struct rtw89_dev * rtwdev,const struct rtw89_pkt_drop_params * params)7420 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
7421 const struct rtw89_pkt_drop_params *params)
7422 {
7423 struct sk_buff *skb;
7424 int ret;
7425
7426 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
7427 if (!skb) {
7428 rtw89_err(rtwdev,
7429 "failed to alloc skb for packet drop\n");
7430 return -ENOMEM;
7431 }
7432
7433 switch (params->sel) {
7434 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
7435 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
7436 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
7437 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
7438 case RTW89_PKT_DROP_SEL_BAND_ONCE:
7439 break;
7440 default:
7441 rtw89_debug(rtwdev, RTW89_DBG_FW,
7442 "H2C of pkt drop might not fully support sel: %d yet\n",
7443 params->sel);
7444 break;
7445 }
7446
7447 skb_put(skb, H2C_PKT_DROP_LEN);
7448 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
7449 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
7450 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
7451 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
7452 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
7453 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
7454 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
7455 params->macid_band_sel[0]);
7456 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
7457 params->macid_band_sel[1]);
7458 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
7459 params->macid_band_sel[2]);
7460 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
7461 params->macid_band_sel[3]);
7462
7463 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7464 H2C_CAT_MAC,
7465 H2C_CL_MAC_FW_OFLD,
7466 H2C_FUNC_PKT_DROP, 0, 0,
7467 H2C_PKT_DROP_LEN);
7468
7469 ret = rtw89_h2c_tx(rtwdev, skb, false);
7470 if (ret) {
7471 rtw89_err(rtwdev, "failed to send h2c\n");
7472 goto fail;
7473 }
7474
7475 return 0;
7476
7477 fail:
7478 dev_kfree_skb_any(skb);
7479 return ret;
7480 }
7481
7482 #define H2C_KEEP_ALIVE_LEN 4
rtw89_fw_h2c_keep_alive(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7483 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7484 bool enable)
7485 {
7486 struct sk_buff *skb;
7487 u8 pkt_id = 0;
7488 int ret;
7489
7490 if (enable) {
7491 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7492 RTW89_PKT_OFLD_TYPE_NULL_DATA,
7493 &pkt_id);
7494 if (ret)
7495 return -EPERM;
7496 }
7497
7498 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
7499 if (!skb) {
7500 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
7501 return -ENOMEM;
7502 }
7503
7504 skb_put(skb, H2C_KEEP_ALIVE_LEN);
7505
7506 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
7507 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
7508 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
7509 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id);
7510
7511 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7512 H2C_CAT_MAC,
7513 H2C_CL_MAC_WOW,
7514 H2C_FUNC_KEEP_ALIVE, 0, 1,
7515 H2C_KEEP_ALIVE_LEN);
7516
7517 ret = rtw89_h2c_tx(rtwdev, skb, false);
7518 if (ret) {
7519 rtw89_err(rtwdev, "failed to send h2c\n");
7520 goto fail;
7521 }
7522
7523 return 0;
7524
7525 fail:
7526 dev_kfree_skb_any(skb);
7527
7528 return ret;
7529 }
7530
rtw89_fw_h2c_arp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7531 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7532 bool enable)
7533 {
7534 struct rtw89_h2c_arp_offload *h2c;
7535 u32 len = sizeof(*h2c);
7536 struct sk_buff *skb;
7537 u8 pkt_id = 0;
7538 int ret;
7539
7540 if (enable) {
7541 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7542 RTW89_PKT_OFLD_TYPE_ARP_RSP,
7543 &pkt_id);
7544 if (ret)
7545 return ret;
7546 }
7547
7548 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7549 if (!skb) {
7550 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n");
7551 return -ENOMEM;
7552 }
7553
7554 skb_put(skb, len);
7555 h2c = (struct rtw89_h2c_arp_offload *)skb->data;
7556
7557 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) |
7558 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) |
7559 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
7560 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID);
7561
7562 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7563 H2C_CAT_MAC,
7564 H2C_CL_MAC_WOW,
7565 H2C_FUNC_ARP_OFLD, 0, 1,
7566 len);
7567
7568 ret = rtw89_h2c_tx(rtwdev, skb, false);
7569 if (ret) {
7570 rtw89_err(rtwdev, "failed to send h2c\n");
7571 goto fail;
7572 }
7573
7574 return 0;
7575
7576 fail:
7577 dev_kfree_skb_any(skb);
7578
7579 return ret;
7580 }
7581
7582 #define H2C_DISCONNECT_DETECT_LEN 8
rtw89_fw_h2c_disconnect_detect(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7583 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
7584 struct rtw89_vif_link *rtwvif_link, bool enable)
7585 {
7586 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7587 struct sk_buff *skb;
7588 u8 macid = rtwvif_link->mac_id;
7589 int ret;
7590
7591 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
7592 if (!skb) {
7593 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
7594 return -ENOMEM;
7595 }
7596
7597 skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
7598
7599 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
7600 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
7601 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
7602 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
7603 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
7604 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
7605 }
7606
7607 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7608 H2C_CAT_MAC,
7609 H2C_CL_MAC_WOW,
7610 H2C_FUNC_DISCONNECT_DETECT, 0, 1,
7611 H2C_DISCONNECT_DETECT_LEN);
7612
7613 ret = rtw89_h2c_tx(rtwdev, skb, false);
7614 if (ret) {
7615 rtw89_err(rtwdev, "failed to send h2c\n");
7616 goto fail;
7617 }
7618
7619 return 0;
7620
7621 fail:
7622 dev_kfree_skb_any(skb);
7623
7624 return ret;
7625 }
7626
rtw89_fw_h2c_cfg_pno(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7627 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7628 bool enable)
7629 {
7630 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7631 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
7632 struct rtw89_h2c_cfg_nlo *h2c;
7633 u32 len = sizeof(*h2c);
7634 struct sk_buff *skb;
7635 int ret, i;
7636
7637 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7638 if (!skb) {
7639 rtw89_err(rtwdev, "failed to alloc skb for nlo\n");
7640 return -ENOMEM;
7641 }
7642
7643 skb_put(skb, len);
7644 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data;
7645
7646 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) |
7647 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) |
7648 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID);
7649
7650 if (enable) {
7651 h2c->nlo_cnt = nd_config->n_match_sets;
7652 for (i = 0 ; i < nd_config->n_match_sets; i++) {
7653 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len;
7654 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid,
7655 nd_config->match_sets[i].ssid.ssid_len);
7656 }
7657 }
7658
7659 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7660 H2C_CAT_MAC,
7661 H2C_CL_MAC_WOW,
7662 H2C_FUNC_NLO, 0, 1,
7663 len);
7664
7665 ret = rtw89_h2c_tx(rtwdev, skb, false);
7666 if (ret) {
7667 rtw89_err(rtwdev, "failed to send h2c\n");
7668 goto fail;
7669 }
7670
7671 return 0;
7672
7673 fail:
7674 dev_kfree_skb_any(skb);
7675 return ret;
7676 }
7677
rtw89_fw_h2c_wow_global(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7678 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7679 bool enable)
7680 {
7681 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7682 struct rtw89_h2c_wow_global *h2c;
7683 u8 macid = rtwvif_link->mac_id;
7684 u32 len = sizeof(*h2c);
7685 struct sk_buff *skb;
7686 int ret;
7687
7688 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7689 if (!skb) {
7690 rtw89_err(rtwdev, "failed to alloc skb for wow global\n");
7691 return -ENOMEM;
7692 }
7693
7694 skb_put(skb, len);
7695 h2c = (struct rtw89_h2c_wow_global *)skb->data;
7696
7697 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) |
7698 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) |
7699 le32_encode_bits(rtw_wow->ptk_alg,
7700 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) |
7701 le32_encode_bits(rtw_wow->gtk_alg,
7702 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO);
7703 h2c->key_info = rtw_wow->key_info;
7704
7705 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7706 H2C_CAT_MAC,
7707 H2C_CL_MAC_WOW,
7708 H2C_FUNC_WOW_GLOBAL, 0, 1,
7709 len);
7710
7711 ret = rtw89_h2c_tx(rtwdev, skb, false);
7712 if (ret) {
7713 rtw89_err(rtwdev, "failed to send h2c\n");
7714 goto fail;
7715 }
7716
7717 return 0;
7718
7719 fail:
7720 dev_kfree_skb_any(skb);
7721
7722 return ret;
7723 }
7724
7725 #define H2C_WAKEUP_CTRL_LEN 4
rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7726 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
7727 struct rtw89_vif_link *rtwvif_link,
7728 bool enable)
7729 {
7730 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7731 struct sk_buff *skb;
7732 u8 macid = rtwvif_link->mac_id;
7733 int ret;
7734
7735 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
7736 if (!skb) {
7737 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n");
7738 return -ENOMEM;
7739 }
7740
7741 skb_put(skb, H2C_WAKEUP_CTRL_LEN);
7742
7743 if (rtw_wow->pattern_cnt)
7744 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
7745 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
7746 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
7747 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
7748 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
7749
7750 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
7751
7752 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7753 H2C_CAT_MAC,
7754 H2C_CL_MAC_WOW,
7755 H2C_FUNC_WAKEUP_CTRL, 0, 1,
7756 H2C_WAKEUP_CTRL_LEN);
7757
7758 ret = rtw89_h2c_tx(rtwdev, skb, false);
7759 if (ret) {
7760 rtw89_err(rtwdev, "failed to send h2c\n");
7761 goto fail;
7762 }
7763
7764 return 0;
7765
7766 fail:
7767 dev_kfree_skb_any(skb);
7768
7769 return ret;
7770 }
7771
7772 #define H2C_WOW_CAM_UPD_LEN 24
rtw89_fw_wow_cam_update(struct rtw89_dev * rtwdev,struct rtw89_wow_cam_info * cam_info)7773 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
7774 struct rtw89_wow_cam_info *cam_info)
7775 {
7776 struct sk_buff *skb;
7777 int ret;
7778
7779 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
7780 if (!skb) {
7781 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
7782 return -ENOMEM;
7783 }
7784
7785 skb_put(skb, H2C_WOW_CAM_UPD_LEN);
7786
7787 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
7788 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
7789 if (cam_info->valid) {
7790 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
7791 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
7792 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
7793 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
7794 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
7795 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
7796 cam_info->negative_pattern_match);
7797 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
7798 cam_info->skip_mac_hdr);
7799 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
7800 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
7801 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
7802 }
7803 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
7804
7805 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7806 H2C_CAT_MAC,
7807 H2C_CL_MAC_WOW,
7808 H2C_FUNC_WOW_CAM_UPD, 0, 1,
7809 H2C_WOW_CAM_UPD_LEN);
7810
7811 ret = rtw89_h2c_tx(rtwdev, skb, false);
7812 if (ret) {
7813 rtw89_err(rtwdev, "failed to send h2c\n");
7814 goto fail;
7815 }
7816
7817 return 0;
7818 fail:
7819 dev_kfree_skb_any(skb);
7820
7821 return ret;
7822 }
7823
rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7824 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
7825 struct rtw89_vif_link *rtwvif_link,
7826 bool enable)
7827 {
7828 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7829 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
7830 struct rtw89_h2c_wow_gtk_ofld *h2c;
7831 u8 macid = rtwvif_link->mac_id;
7832 u32 len = sizeof(*h2c);
7833 u8 pkt_id_sa_query = 0;
7834 struct sk_buff *skb;
7835 u8 pkt_id_eapol = 0;
7836 int ret;
7837
7838 if (!rtw_wow->gtk_alg)
7839 return 0;
7840
7841 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7842 if (!skb) {
7843 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n");
7844 return -ENOMEM;
7845 }
7846
7847 skb_put(skb, len);
7848 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data;
7849
7850 if (!enable)
7851 goto hdr;
7852
7853 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7854 RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
7855 &pkt_id_eapol);
7856 if (ret)
7857 goto fail;
7858
7859 if (gtk_info->igtk_keyid) {
7860 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
7861 RTW89_PKT_OFLD_TYPE_SA_QUERY,
7862 &pkt_id_sa_query);
7863 if (ret)
7864 goto fail;
7865 }
7866
7867 /* not support TKIP yet */
7868 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
7869 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
7870 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
7871 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
7872 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
7873 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID);
7874 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0,
7875 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) |
7876 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT);
7877 h2c->gtk_info = rtw_wow->gtk_info;
7878
7879 hdr:
7880 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7881 H2C_CAT_MAC,
7882 H2C_CL_MAC_WOW,
7883 H2C_FUNC_GTK_OFLD, 0, 1,
7884 len);
7885
7886 ret = rtw89_h2c_tx(rtwdev, skb, false);
7887 if (ret) {
7888 rtw89_err(rtwdev, "failed to send h2c\n");
7889 goto fail;
7890 }
7891 return 0;
7892 fail:
7893 dev_kfree_skb_any(skb);
7894
7895 return ret;
7896 }
7897
rtw89_fw_h2c_fwips(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)7898 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
7899 bool enable)
7900 {
7901 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
7902 struct rtw89_h2c_fwips *h2c;
7903 u32 len = sizeof(*h2c);
7904 struct sk_buff *skb;
7905
7906 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7907 if (!skb) {
7908 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n");
7909 return -ENOMEM;
7910 }
7911 skb_put(skb, len);
7912 h2c = (struct rtw89_h2c_fwips *)skb->data;
7913
7914 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
7915 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE);
7916
7917 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7918 H2C_CAT_MAC,
7919 H2C_CL_MAC_PS,
7920 H2C_FUNC_IPS_CFG, 0, 1,
7921 len);
7922
7923 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
7924 }
7925
rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev * rtwdev)7926 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
7927 {
7928 struct rtw89_wait_info *wait = &rtwdev->wow.wait;
7929 struct rtw89_h2c_wow_aoac *h2c;
7930 u32 len = sizeof(*h2c);
7931 struct sk_buff *skb;
7932
7933 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7934 if (!skb) {
7935 rtw89_err(rtwdev, "failed to alloc skb for aoac\n");
7936 return -ENOMEM;
7937 }
7938
7939 skb_put(skb, len);
7940
7941 /* This H2C only nofity firmware to generate AOAC report C2H,
7942 * no need any parameter.
7943 */
7944 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7945 H2C_CAT_MAC,
7946 H2C_CL_MAC_WOW,
7947 H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
7948 len);
7949
7950 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
7951 }
7952
7953 /* Return < 0, if failures happen during waiting for the condition.
7954 * Return 0, when waiting for the condition succeeds.
7955 * Return > 0, if the wait is considered unreachable due to driver/FW design,
7956 * where 1 means during SER.
7957 */
rtw89_h2c_tx_and_wait(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct rtw89_wait_info * wait,unsigned int cond)7958 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
7959 struct rtw89_wait_info *wait, unsigned int cond)
7960 {
7961 int ret;
7962
7963 ret = rtw89_h2c_tx(rtwdev, skb, false);
7964 if (ret) {
7965 rtw89_err(rtwdev, "failed to send h2c\n");
7966 dev_kfree_skb_any(skb);
7967 return -EBUSY;
7968 }
7969
7970 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
7971 return 1;
7972
7973 return rtw89_wait_for_cond(wait, cond);
7974 }
7975
7976 #define H2C_ADD_MCC_LEN 16
rtw89_fw_h2c_add_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_add_req * p)7977 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
7978 const struct rtw89_fw_mcc_add_req *p)
7979 {
7980 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
7981 struct sk_buff *skb;
7982 unsigned int cond;
7983
7984 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
7985 if (!skb) {
7986 rtw89_err(rtwdev,
7987 "failed to alloc skb for add mcc\n");
7988 return -ENOMEM;
7989 }
7990
7991 skb_put(skb, H2C_ADD_MCC_LEN);
7992 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
7993 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
7994 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
7995 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
7996 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
7997 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
7998 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
7999 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
8000 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
8001 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
8002 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
8003 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
8004 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
8005 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
8006 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
8007 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
8008 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
8009 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
8010 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
8011 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
8012
8013 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8014 H2C_CAT_MAC,
8015 H2C_CL_MCC,
8016 H2C_FUNC_ADD_MCC, 0, 0,
8017 H2C_ADD_MCC_LEN);
8018
8019 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
8020 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8021 }
8022
8023 #define H2C_START_MCC_LEN 12
rtw89_fw_h2c_start_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_start_req * p)8024 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
8025 const struct rtw89_fw_mcc_start_req *p)
8026 {
8027 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8028 struct sk_buff *skb;
8029 unsigned int cond;
8030
8031 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
8032 if (!skb) {
8033 rtw89_err(rtwdev,
8034 "failed to alloc skb for start mcc\n");
8035 return -ENOMEM;
8036 }
8037
8038 skb_put(skb, H2C_START_MCC_LEN);
8039 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
8040 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
8041 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
8042 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
8043 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
8044 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
8045 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
8046 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
8047 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
8048
8049 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8050 H2C_CAT_MAC,
8051 H2C_CL_MCC,
8052 H2C_FUNC_START_MCC, 0, 0,
8053 H2C_START_MCC_LEN);
8054
8055 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
8056 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8057 }
8058
8059 #define H2C_STOP_MCC_LEN 4
rtw89_fw_h2c_stop_mcc(struct rtw89_dev * rtwdev,u8 group,u8 macid,bool prev_groups)8060 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
8061 bool prev_groups)
8062 {
8063 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8064 struct sk_buff *skb;
8065 unsigned int cond;
8066
8067 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
8068 if (!skb) {
8069 rtw89_err(rtwdev,
8070 "failed to alloc skb for stop mcc\n");
8071 return -ENOMEM;
8072 }
8073
8074 skb_put(skb, H2C_STOP_MCC_LEN);
8075 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
8076 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
8077 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
8078
8079 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8080 H2C_CAT_MAC,
8081 H2C_CL_MCC,
8082 H2C_FUNC_STOP_MCC, 0, 0,
8083 H2C_STOP_MCC_LEN);
8084
8085 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
8086 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8087 }
8088
8089 #define H2C_DEL_MCC_GROUP_LEN 4
rtw89_fw_h2c_del_mcc_group(struct rtw89_dev * rtwdev,u8 group,bool prev_groups)8090 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
8091 bool prev_groups)
8092 {
8093 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8094 struct sk_buff *skb;
8095 unsigned int cond;
8096
8097 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
8098 if (!skb) {
8099 rtw89_err(rtwdev,
8100 "failed to alloc skb for del mcc group\n");
8101 return -ENOMEM;
8102 }
8103
8104 skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
8105 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
8106 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
8107
8108 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8109 H2C_CAT_MAC,
8110 H2C_CL_MCC,
8111 H2C_FUNC_DEL_MCC_GROUP, 0, 0,
8112 H2C_DEL_MCC_GROUP_LEN);
8113
8114 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
8115 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8116 }
8117
8118 #define H2C_RESET_MCC_GROUP_LEN 4
rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev * rtwdev,u8 group)8119 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
8120 {
8121 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8122 struct sk_buff *skb;
8123 unsigned int cond;
8124
8125 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
8126 if (!skb) {
8127 rtw89_err(rtwdev,
8128 "failed to alloc skb for reset mcc group\n");
8129 return -ENOMEM;
8130 }
8131
8132 skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
8133 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
8134
8135 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8136 H2C_CAT_MAC,
8137 H2C_CL_MCC,
8138 H2C_FUNC_RESET_MCC_GROUP, 0, 0,
8139 H2C_RESET_MCC_GROUP_LEN);
8140
8141 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
8142 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8143 }
8144
8145 #define H2C_MCC_REQ_TSF_LEN 4
rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_tsf_req * req,struct rtw89_mac_mcc_tsf_rpt * rpt)8146 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
8147 const struct rtw89_fw_mcc_tsf_req *req,
8148 struct rtw89_mac_mcc_tsf_rpt *rpt)
8149 {
8150 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8151 struct rtw89_mac_mcc_tsf_rpt *tmp;
8152 struct sk_buff *skb;
8153 unsigned int cond;
8154 int ret;
8155
8156 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
8157 if (!skb) {
8158 rtw89_err(rtwdev,
8159 "failed to alloc skb for mcc req tsf\n");
8160 return -ENOMEM;
8161 }
8162
8163 skb_put(skb, H2C_MCC_REQ_TSF_LEN);
8164 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
8165 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
8166 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
8167
8168 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8169 H2C_CAT_MAC,
8170 H2C_CL_MCC,
8171 H2C_FUNC_MCC_REQ_TSF, 0, 0,
8172 H2C_MCC_REQ_TSF_LEN);
8173
8174 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
8175 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8176 if (ret)
8177 return ret;
8178
8179 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
8180 *rpt = *tmp;
8181
8182 return 0;
8183 }
8184
8185 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev * rtwdev,u8 group,u8 macid,u8 * bitmap)8186 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid,
8187 u8 *bitmap)
8188 {
8189 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8190 struct sk_buff *skb;
8191 unsigned int cond;
8192 u8 map_len;
8193 u8 h2c_len;
8194
8195 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
8196 map_len = RTW89_MAX_MAC_ID_NUM / 8;
8197 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
8198 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
8199 if (!skb) {
8200 rtw89_err(rtwdev,
8201 "failed to alloc skb for mcc macid bitmap\n");
8202 return -ENOMEM;
8203 }
8204
8205 skb_put(skb, h2c_len);
8206 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
8207 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
8208 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
8209 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
8210
8211 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8212 H2C_CAT_MAC,
8213 H2C_CL_MCC,
8214 H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
8215 h2c_len);
8216
8217 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
8218 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8219 }
8220
8221 #define H2C_MCC_SYNC_LEN 4
rtw89_fw_h2c_mcc_sync(struct rtw89_dev * rtwdev,u8 group,u8 source,u8 target,u8 offset)8222 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
8223 u8 target, u8 offset)
8224 {
8225 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8226 struct sk_buff *skb;
8227 unsigned int cond;
8228
8229 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
8230 if (!skb) {
8231 rtw89_err(rtwdev,
8232 "failed to alloc skb for mcc sync\n");
8233 return -ENOMEM;
8234 }
8235
8236 skb_put(skb, H2C_MCC_SYNC_LEN);
8237 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
8238 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
8239 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
8240 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
8241
8242 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8243 H2C_CAT_MAC,
8244 H2C_CL_MCC,
8245 H2C_FUNC_MCC_SYNC, 0, 0,
8246 H2C_MCC_SYNC_LEN);
8247
8248 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
8249 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8250 }
8251
8252 #define H2C_MCC_SET_DURATION_LEN 20
rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_duration * p)8253 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
8254 const struct rtw89_fw_mcc_duration *p)
8255 {
8256 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8257 struct sk_buff *skb;
8258 unsigned int cond;
8259
8260 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
8261 if (!skb) {
8262 rtw89_err(rtwdev,
8263 "failed to alloc skb for mcc set duration\n");
8264 return -ENOMEM;
8265 }
8266
8267 skb_put(skb, H2C_MCC_SET_DURATION_LEN);
8268 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
8269 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
8270 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
8271 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
8272 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
8273 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
8274 p->start_tsf_low);
8275 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
8276 p->start_tsf_high);
8277 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
8278 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
8279
8280 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8281 H2C_CAT_MAC,
8282 H2C_CL_MCC,
8283 H2C_FUNC_MCC_SET_DURATION, 0, 0,
8284 H2C_MCC_SET_DURATION_LEN);
8285
8286 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
8287 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8288 }
8289
8290 static
rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_add_slot_arg * slot_arg,struct rtw89_h2c_mrc_add_slot * slot_h2c)8291 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
8292 const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
8293 struct rtw89_h2c_mrc_add_slot *slot_h2c)
8294 {
8295 bool fill_h2c = !!slot_h2c;
8296 unsigned int i;
8297
8298 if (!fill_h2c)
8299 goto calc_len;
8300
8301 slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
8302 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
8303 le32_encode_bits(slot_arg->courtesy_en,
8304 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
8305 le32_encode_bits(slot_arg->role_num,
8306 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
8307 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
8308 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
8309 le32_encode_bits(slot_arg->courtesy_target,
8310 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
8311
8312 for (i = 0; i < slot_arg->role_num; i++) {
8313 slot_h2c->roles[i].w0 =
8314 le32_encode_bits(slot_arg->roles[i].macid,
8315 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
8316 le32_encode_bits(slot_arg->roles[i].role_type,
8317 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
8318 le32_encode_bits(slot_arg->roles[i].is_master,
8319 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
8320 le32_encode_bits(slot_arg->roles[i].en_tx_null,
8321 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
8322 le32_encode_bits(false,
8323 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
8324 le32_encode_bits(false,
8325 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
8326 slot_h2c->roles[i].w1 =
8327 le32_encode_bits(slot_arg->roles[i].central_ch,
8328 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
8329 le32_encode_bits(slot_arg->roles[i].primary_ch,
8330 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
8331 le32_encode_bits(slot_arg->roles[i].bw,
8332 RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
8333 le32_encode_bits(slot_arg->roles[i].band,
8334 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
8335 le32_encode_bits(slot_arg->roles[i].null_early,
8336 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
8337 le32_encode_bits(false,
8338 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
8339 le32_encode_bits(true,
8340 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
8341 slot_h2c->roles[i].macid_main_bitmap =
8342 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
8343 slot_h2c->roles[i].macid_paired_bitmap =
8344 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
8345 }
8346
8347 calc_len:
8348 return struct_size(slot_h2c, roles, slot_arg->role_num);
8349 }
8350
rtw89_fw_h2c_mrc_add(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_add_arg * arg)8351 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
8352 const struct rtw89_fw_mrc_add_arg *arg)
8353 {
8354 struct rtw89_h2c_mrc_add *h2c_head;
8355 struct sk_buff *skb;
8356 unsigned int i;
8357 void *tmp;
8358 u32 len;
8359 int ret;
8360
8361 len = sizeof(*h2c_head);
8362 for (i = 0; i < arg->slot_num; i++)
8363 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
8364
8365 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8366 if (!skb) {
8367 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
8368 return -ENOMEM;
8369 }
8370
8371 skb_put(skb, len);
8372 tmp = skb->data;
8373
8374 h2c_head = tmp;
8375 h2c_head->w0 = le32_encode_bits(arg->sch_idx,
8376 RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
8377 le32_encode_bits(arg->sch_type,
8378 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
8379 le32_encode_bits(arg->slot_num,
8380 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
8381 le32_encode_bits(arg->btc_in_sch,
8382 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
8383
8384 tmp += sizeof(*h2c_head);
8385 for (i = 0; i < arg->slot_num; i++)
8386 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
8387
8388 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8389 H2C_CAT_MAC,
8390 H2C_CL_MRC,
8391 H2C_FUNC_ADD_MRC, 0, 0,
8392 len);
8393
8394 ret = rtw89_h2c_tx(rtwdev, skb, false);
8395 if (ret) {
8396 rtw89_err(rtwdev, "failed to send h2c\n");
8397 dev_kfree_skb_any(skb);
8398 return -EBUSY;
8399 }
8400
8401 return 0;
8402 }
8403
rtw89_fw_h2c_mrc_start(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_start_arg * arg)8404 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
8405 const struct rtw89_fw_mrc_start_arg *arg)
8406 {
8407 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8408 struct rtw89_h2c_mrc_start *h2c;
8409 u32 len = sizeof(*h2c);
8410 struct sk_buff *skb;
8411 unsigned int cond;
8412
8413 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8414 if (!skb) {
8415 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
8416 return -ENOMEM;
8417 }
8418
8419 skb_put(skb, len);
8420 h2c = (struct rtw89_h2c_mrc_start *)skb->data;
8421
8422 h2c->w0 = le32_encode_bits(arg->sch_idx,
8423 RTW89_H2C_MRC_START_W0_SCH_IDX) |
8424 le32_encode_bits(arg->old_sch_idx,
8425 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
8426 le32_encode_bits(arg->action,
8427 RTW89_H2C_MRC_START_W0_ACTION);
8428
8429 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
8430 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
8431
8432 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8433 H2C_CAT_MAC,
8434 H2C_CL_MRC,
8435 H2C_FUNC_START_MRC, 0, 0,
8436 len);
8437
8438 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
8439 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8440 }
8441
rtw89_fw_h2c_mrc_del(struct rtw89_dev * rtwdev,u8 sch_idx,u8 slot_idx)8442 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx)
8443 {
8444 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8445 struct rtw89_h2c_mrc_del *h2c;
8446 u32 len = sizeof(*h2c);
8447 struct sk_buff *skb;
8448 unsigned int cond;
8449
8450 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8451 if (!skb) {
8452 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
8453 return -ENOMEM;
8454 }
8455
8456 skb_put(skb, len);
8457 h2c = (struct rtw89_h2c_mrc_del *)skb->data;
8458
8459 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) |
8460 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX);
8461
8462 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8463 H2C_CAT_MAC,
8464 H2C_CL_MRC,
8465 H2C_FUNC_DEL_MRC, 0, 0,
8466 len);
8467
8468 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
8469 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8470 }
8471
rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_req_tsf_arg * arg,struct rtw89_mac_mrc_tsf_rpt * rpt)8472 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
8473 const struct rtw89_fw_mrc_req_tsf_arg *arg,
8474 struct rtw89_mac_mrc_tsf_rpt *rpt)
8475 {
8476 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8477 struct rtw89_h2c_mrc_req_tsf *h2c;
8478 struct rtw89_mac_mrc_tsf_rpt *tmp;
8479 struct sk_buff *skb;
8480 unsigned int i;
8481 u32 len;
8482 int ret;
8483
8484 len = struct_size(h2c, infos, arg->num);
8485 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8486 if (!skb) {
8487 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
8488 return -ENOMEM;
8489 }
8490
8491 skb_put(skb, len);
8492 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
8493
8494 h2c->req_tsf_num = arg->num;
8495 for (i = 0; i < arg->num; i++)
8496 h2c->infos[i] =
8497 u8_encode_bits(arg->infos[i].band,
8498 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
8499 u8_encode_bits(arg->infos[i].port,
8500 RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
8501
8502 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8503 H2C_CAT_MAC,
8504 H2C_CL_MRC,
8505 H2C_FUNC_MRC_REQ_TSF, 0, 0,
8506 len);
8507
8508 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
8509 if (ret)
8510 return ret;
8511
8512 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
8513 *rpt = *tmp;
8514
8515 return 0;
8516 }
8517
rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_upd_bitmap_arg * arg)8518 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
8519 const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
8520 {
8521 struct rtw89_h2c_mrc_upd_bitmap *h2c;
8522 u32 len = sizeof(*h2c);
8523 struct sk_buff *skb;
8524 int ret;
8525
8526 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8527 if (!skb) {
8528 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
8529 return -ENOMEM;
8530 }
8531
8532 skb_put(skb, len);
8533 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
8534
8535 h2c->w0 = le32_encode_bits(arg->sch_idx,
8536 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
8537 le32_encode_bits(arg->action,
8538 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
8539 le32_encode_bits(arg->macid,
8540 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
8541 h2c->w1 = le32_encode_bits(arg->client_macid,
8542 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
8543
8544 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8545 H2C_CAT_MAC,
8546 H2C_CL_MRC,
8547 H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
8548 len);
8549
8550 ret = rtw89_h2c_tx(rtwdev, skb, false);
8551 if (ret) {
8552 rtw89_err(rtwdev, "failed to send h2c\n");
8553 dev_kfree_skb_any(skb);
8554 return -EBUSY;
8555 }
8556
8557 return 0;
8558 }
8559
rtw89_fw_h2c_mrc_sync(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_sync_arg * arg)8560 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
8561 const struct rtw89_fw_mrc_sync_arg *arg)
8562 {
8563 struct rtw89_h2c_mrc_sync *h2c;
8564 u32 len = sizeof(*h2c);
8565 struct sk_buff *skb;
8566 int ret;
8567
8568 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8569 if (!skb) {
8570 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
8571 return -ENOMEM;
8572 }
8573
8574 skb_put(skb, len);
8575 h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
8576
8577 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
8578 le32_encode_bits(arg->src.port,
8579 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
8580 le32_encode_bits(arg->src.band,
8581 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
8582 le32_encode_bits(arg->dest.port,
8583 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
8584 le32_encode_bits(arg->dest.band,
8585 RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
8586 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
8587
8588 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8589 H2C_CAT_MAC,
8590 H2C_CL_MRC,
8591 H2C_FUNC_MRC_SYNC, 0, 0,
8592 len);
8593
8594 ret = rtw89_h2c_tx(rtwdev, skb, false);
8595 if (ret) {
8596 rtw89_err(rtwdev, "failed to send h2c\n");
8597 dev_kfree_skb_any(skb);
8598 return -EBUSY;
8599 }
8600
8601 return 0;
8602 }
8603
rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_upd_duration_arg * arg)8604 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
8605 const struct rtw89_fw_mrc_upd_duration_arg *arg)
8606 {
8607 struct rtw89_h2c_mrc_upd_duration *h2c;
8608 struct sk_buff *skb;
8609 unsigned int i;
8610 u32 len;
8611 int ret;
8612
8613 len = struct_size(h2c, slots, arg->slot_num);
8614 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8615 if (!skb) {
8616 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
8617 return -ENOMEM;
8618 }
8619
8620 skb_put(skb, len);
8621 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
8622
8623 h2c->w0 = le32_encode_bits(arg->sch_idx,
8624 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
8625 le32_encode_bits(arg->slot_num,
8626 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
8627 le32_encode_bits(false,
8628 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
8629
8630 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
8631 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
8632
8633 for (i = 0; i < arg->slot_num; i++) {
8634 h2c->slots[i] =
8635 le32_encode_bits(arg->slots[i].slot_idx,
8636 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
8637 le32_encode_bits(arg->slots[i].duration,
8638 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
8639 }
8640
8641 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8642 H2C_CAT_MAC,
8643 H2C_CL_MRC,
8644 H2C_FUNC_MRC_UPD_DURATION, 0, 0,
8645 len);
8646
8647 ret = rtw89_h2c_tx(rtwdev, skb, false);
8648 if (ret) {
8649 rtw89_err(rtwdev, "failed to send h2c\n");
8650 dev_kfree_skb_any(skb);
8651 return -EBUSY;
8652 }
8653
8654 return 0;
8655 }
8656
rtw89_fw_h2c_ap_info(struct rtw89_dev * rtwdev,bool en)8657 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
8658 {
8659 struct rtw89_h2c_ap_info *h2c;
8660 u32 len = sizeof(*h2c);
8661 struct sk_buff *skb;
8662 int ret;
8663
8664 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8665 if (!skb) {
8666 rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
8667 return -ENOMEM;
8668 }
8669
8670 skb_put(skb, len);
8671 h2c = (struct rtw89_h2c_ap_info *)skb->data;
8672
8673 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
8674
8675 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8676 H2C_CAT_MAC,
8677 H2C_CL_AP,
8678 H2C_FUNC_AP_INFO, 0, 0,
8679 len);
8680
8681 ret = rtw89_h2c_tx(rtwdev, skb, false);
8682 if (ret) {
8683 rtw89_err(rtwdev, "failed to send h2c\n");
8684 dev_kfree_skb_any(skb);
8685 return -EBUSY;
8686 }
8687
8688 return 0;
8689 }
8690
rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev * rtwdev,bool en)8691 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
8692 {
8693 int ret;
8694
8695 if (en) {
8696 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
8697 return 0;
8698 } else {
8699 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
8700 return 0;
8701 }
8702
8703 ret = rtw89_fw_h2c_ap_info(rtwdev, en);
8704 if (ret) {
8705 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
8706 return ret;
8707
8708 /* During recovery, neither driver nor stack has full error
8709 * handling, so show a warning, but return 0 with refcount
8710 * increased normally. It can avoid underflow when calling
8711 * with @en == false later.
8712 */
8713 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
8714 }
8715
8716 if (en)
8717 refcount_set(&rtwdev->refcount_ap_info, 1);
8718
8719 return 0;
8720 }
8721
__fw_txpwr_entry_zero_ext(const void * ext_ptr,u8 ext_len)8722 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
8723 {
8724 static const u8 zeros[U8_MAX] = {};
8725
8726 return memcmp(ext_ptr, zeros, ext_len) == 0;
8727 }
8728
8729 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \
8730 ({ \
8731 u8 __var_sz = sizeof(*(e)); \
8732 bool __accept; \
8733 if (__var_sz >= (ent_sz)) \
8734 __accept = true; \
8735 else \
8736 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\
8737 (ent_sz) - __var_sz);\
8738 __accept; \
8739 })
8740
8741 static bool
fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8742 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e,
8743 const void *cursor,
8744 const struct rtw89_txpwr_conf *conf)
8745 {
8746 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8747 return false;
8748
8749 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM)
8750 return false;
8751
8752 switch (e->rs) {
8753 case RTW89_RS_CCK:
8754 if (e->shf + e->len > RTW89_RATE_CCK_NUM)
8755 return false;
8756 break;
8757 case RTW89_RS_OFDM:
8758 if (e->shf + e->len > RTW89_RATE_OFDM_NUM)
8759 return false;
8760 break;
8761 case RTW89_RS_MCS:
8762 if (e->shf + e->len > __RTW89_RATE_MCS_NUM ||
8763 e->nss >= RTW89_NSS_NUM ||
8764 e->ofdma >= RTW89_OFDMA_NUM)
8765 return false;
8766 break;
8767 case RTW89_RS_HEDCM:
8768 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM ||
8769 e->nss >= RTW89_NSS_HEDCM_NUM ||
8770 e->ofdma >= RTW89_OFDMA_NUM)
8771 return false;
8772 break;
8773 case RTW89_RS_OFFSET:
8774 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM)
8775 return false;
8776 break;
8777 default:
8778 return false;
8779 }
8780
8781 return true;
8782 }
8783
8784 static
rtw89_fw_load_txpwr_byrate(struct rtw89_dev * rtwdev,const struct rtw89_txpwr_table * tbl)8785 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev,
8786 const struct rtw89_txpwr_table *tbl)
8787 {
8788 const struct rtw89_txpwr_conf *conf = tbl->data;
8789 struct rtw89_fw_txpwr_byrate_entry entry = {};
8790 struct rtw89_txpwr_byrate *byr_head;
8791 struct rtw89_rate_desc desc = {};
8792 const void *cursor;
8793 u32 data;
8794 s8 *byr;
8795 int i;
8796
8797 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8798 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf))
8799 continue;
8800
8801 byr_head = &rtwdev->byr[entry.band][entry.bw];
8802 data = le32_to_cpu(entry.data);
8803 desc.ofdma = entry.ofdma;
8804 desc.nss = entry.nss;
8805 desc.rs = entry.rs;
8806
8807 for (i = 0; i < entry.len; i++, data >>= 8) {
8808 desc.idx = entry.shf + i;
8809 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
8810 *byr = data & 0xff;
8811 }
8812 }
8813 }
8814
8815 static bool
fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8816 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e,
8817 const void *cursor,
8818 const struct rtw89_txpwr_conf *conf)
8819 {
8820 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8821 return false;
8822
8823 if (e->bw >= RTW89_2G_BW_NUM)
8824 return false;
8825 if (e->nt >= RTW89_NTX_NUM)
8826 return false;
8827 if (e->rs >= RTW89_RS_LMT_NUM)
8828 return false;
8829 if (e->bf >= RTW89_BF_NUM)
8830 return false;
8831 if (e->regd >= RTW89_REGD_NUM)
8832 return false;
8833 if (e->ch_idx >= RTW89_2G_CH_NUM)
8834 return false;
8835
8836 return true;
8837 }
8838
8839 static
rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data * data)8840 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data)
8841 {
8842 const struct rtw89_txpwr_conf *conf = &data->conf;
8843 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {};
8844 const void *cursor;
8845
8846 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8847 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf))
8848 continue;
8849
8850 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
8851 [entry.ch_idx] = entry.v;
8852 }
8853 }
8854
8855 static bool
fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8856 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e,
8857 const void *cursor,
8858 const struct rtw89_txpwr_conf *conf)
8859 {
8860 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8861 return false;
8862
8863 if (e->bw >= RTW89_5G_BW_NUM)
8864 return false;
8865 if (e->nt >= RTW89_NTX_NUM)
8866 return false;
8867 if (e->rs >= RTW89_RS_LMT_NUM)
8868 return false;
8869 if (e->bf >= RTW89_BF_NUM)
8870 return false;
8871 if (e->regd >= RTW89_REGD_NUM)
8872 return false;
8873 if (e->ch_idx >= RTW89_5G_CH_NUM)
8874 return false;
8875
8876 return true;
8877 }
8878
8879 static
rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data * data)8880 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data)
8881 {
8882 const struct rtw89_txpwr_conf *conf = &data->conf;
8883 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {};
8884 const void *cursor;
8885
8886 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8887 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf))
8888 continue;
8889
8890 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
8891 [entry.ch_idx] = entry.v;
8892 }
8893 }
8894
8895 static bool
fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8896 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e,
8897 const void *cursor,
8898 const struct rtw89_txpwr_conf *conf)
8899 {
8900 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8901 return false;
8902
8903 if (e->bw >= RTW89_6G_BW_NUM)
8904 return false;
8905 if (e->nt >= RTW89_NTX_NUM)
8906 return false;
8907 if (e->rs >= RTW89_RS_LMT_NUM)
8908 return false;
8909 if (e->bf >= RTW89_BF_NUM)
8910 return false;
8911 if (e->regd >= RTW89_REGD_NUM)
8912 return false;
8913 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
8914 return false;
8915 if (e->ch_idx >= RTW89_6G_CH_NUM)
8916 return false;
8917
8918 return true;
8919 }
8920
8921 static
rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data * data)8922 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data)
8923 {
8924 const struct rtw89_txpwr_conf *conf = &data->conf;
8925 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {};
8926 const void *cursor;
8927
8928 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8929 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf))
8930 continue;
8931
8932 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
8933 [entry.reg_6ghz_power][entry.ch_idx] = entry.v;
8934 }
8935 }
8936
8937 static bool
fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8938 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e,
8939 const void *cursor,
8940 const struct rtw89_txpwr_conf *conf)
8941 {
8942 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8943 return false;
8944
8945 if (e->ru >= RTW89_RU_NUM)
8946 return false;
8947 if (e->nt >= RTW89_NTX_NUM)
8948 return false;
8949 if (e->regd >= RTW89_REGD_NUM)
8950 return false;
8951 if (e->ch_idx >= RTW89_2G_CH_NUM)
8952 return false;
8953
8954 return true;
8955 }
8956
8957 static
rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data * data)8958 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data)
8959 {
8960 const struct rtw89_txpwr_conf *conf = &data->conf;
8961 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {};
8962 const void *cursor;
8963
8964 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
8965 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf))
8966 continue;
8967
8968 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
8969 }
8970 }
8971
8972 static bool
fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)8973 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e,
8974 const void *cursor,
8975 const struct rtw89_txpwr_conf *conf)
8976 {
8977 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
8978 return false;
8979
8980 if (e->ru >= RTW89_RU_NUM)
8981 return false;
8982 if (e->nt >= RTW89_NTX_NUM)
8983 return false;
8984 if (e->regd >= RTW89_REGD_NUM)
8985 return false;
8986 if (e->ch_idx >= RTW89_5G_CH_NUM)
8987 return false;
8988
8989 return true;
8990 }
8991
8992 static
rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data * data)8993 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data)
8994 {
8995 const struct rtw89_txpwr_conf *conf = &data->conf;
8996 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {};
8997 const void *cursor;
8998
8999 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9000 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf))
9001 continue;
9002
9003 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
9004 }
9005 }
9006
9007 static bool
fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9008 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e,
9009 const void *cursor,
9010 const struct rtw89_txpwr_conf *conf)
9011 {
9012 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9013 return false;
9014
9015 if (e->ru >= RTW89_RU_NUM)
9016 return false;
9017 if (e->nt >= RTW89_NTX_NUM)
9018 return false;
9019 if (e->regd >= RTW89_REGD_NUM)
9020 return false;
9021 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
9022 return false;
9023 if (e->ch_idx >= RTW89_6G_CH_NUM)
9024 return false;
9025
9026 return true;
9027 }
9028
9029 static
rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data * data)9030 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data)
9031 {
9032 const struct rtw89_txpwr_conf *conf = &data->conf;
9033 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {};
9034 const void *cursor;
9035
9036 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9037 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf))
9038 continue;
9039
9040 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power]
9041 [entry.ch_idx] = entry.v;
9042 }
9043 }
9044
9045 static bool
fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9046 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e,
9047 const void *cursor,
9048 const struct rtw89_txpwr_conf *conf)
9049 {
9050 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9051 return false;
9052
9053 if (e->band >= RTW89_BAND_NUM)
9054 return false;
9055 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM)
9056 return false;
9057 if (e->regd >= RTW89_REGD_NUM)
9058 return false;
9059
9060 return true;
9061 }
9062
9063 static
rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data * data)9064 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data)
9065 {
9066 const struct rtw89_txpwr_conf *conf = &data->conf;
9067 struct rtw89_fw_tx_shape_lmt_entry entry = {};
9068 const void *cursor;
9069
9070 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9071 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf))
9072 continue;
9073
9074 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v;
9075 }
9076 }
9077
9078 static bool
fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9079 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e,
9080 const void *cursor,
9081 const struct rtw89_txpwr_conf *conf)
9082 {
9083 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9084 return false;
9085
9086 if (e->band >= RTW89_BAND_NUM)
9087 return false;
9088 if (e->regd >= RTW89_REGD_NUM)
9089 return false;
9090
9091 return true;
9092 }
9093
9094 static
rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data * data)9095 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
9096 {
9097 const struct rtw89_txpwr_conf *conf = &data->conf;
9098 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {};
9099 const void *cursor;
9100
9101 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9102 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf))
9103 continue;
9104
9105 data->v[entry.band][entry.regd] = entry.v;
9106 }
9107 }
9108
9109 const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev * rtwdev,const struct rtw89_rfe_parms * init)9110 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
9111 const struct rtw89_rfe_parms *init)
9112 {
9113 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data;
9114 struct rtw89_rfe_parms *parms;
9115
9116 if (!rfe_data)
9117 return init;
9118
9119 parms = &rfe_data->rfe_parms;
9120 if (init)
9121 *parms = *init;
9122
9123 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) {
9124 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf;
9125 rfe_data->byrate.tbl.size = 0; /* don't care here */
9126 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate;
9127 parms->byr_tbl = &rfe_data->byrate.tbl;
9128 }
9129
9130 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) {
9131 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz);
9132 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v;
9133 }
9134
9135 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) {
9136 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz);
9137 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v;
9138 }
9139
9140 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) {
9141 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz);
9142 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
9143 }
9144
9145 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
9146 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
9147 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
9148 }
9149
9150 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) {
9151 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz);
9152 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v;
9153 }
9154
9155 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) {
9156 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz);
9157 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
9158 }
9159
9160 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
9161 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
9162 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
9163 }
9164
9165 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) {
9166 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru);
9167 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
9168 }
9169
9170 return parms;
9171 }
9172