1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5 #include <linux/if_arp.h>
6 #include "cam.h"
7 #include "chan.h"
8 #include "coex.h"
9 #include "debug.h"
10 #include "fw.h"
11 #include "mac.h"
12 #include "phy.h"
13 #include "ps.h"
14 #include "reg.h"
15 #include "util.h"
16 #include "wow.h"
17
18 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev);
19
20 struct rtw89_eapol_2_of_2 {
21 u8 gtkbody[14];
22 u8 key_des_ver;
23 u8 rsvd[92];
24 } __packed;
25
26 struct rtw89_sa_query {
27 u8 category;
28 u8 action;
29 } __packed;
30
31 struct rtw89_arp_rsp {
32 u8 llc_hdr[sizeof(rfc1042_header)];
33 __be16 llc_type;
34 struct arphdr arp_hdr;
35 u8 sender_hw[ETH_ALEN];
36 __be32 sender_ip;
37 u8 target_hw[ETH_ALEN];
38 __be32 target_ip;
39 } __packed;
40
41 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
42
43 const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = {
44 .ver = 0x00,
45 .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
46 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
47 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
48 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
49 },
50 };
51 EXPORT_SYMBOL(rtw89_fw_blacklist_default);
52
53 union rtw89_fw_element_arg {
54 size_t offset;
55 enum rtw89_rf_path rf_path;
56 enum rtw89_fw_type fw_type;
57 };
58
59 struct rtw89_fw_element_handler {
60 int (*fn)(struct rtw89_dev *rtwdev,
61 const struct rtw89_fw_element_hdr *elm,
62 const union rtw89_fw_element_arg arg);
63 const union rtw89_fw_element_arg arg;
64 const char *name;
65 };
66
67 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
68 struct sk_buff *skb);
69 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
70 struct rtw89_wait_info *wait, unsigned int cond);
71 static int __parse_security_section(struct rtw89_dev *rtwdev,
72 struct rtw89_fw_bin_info *info,
73 struct rtw89_fw_hdr_section_info *section_info,
74 const void *content,
75 u32 *mssc_len);
76
rtw89_fw_h2c_alloc_skb(struct rtw89_dev * rtwdev,u32 len,bool header)77 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
78 bool header)
79 {
80 struct sk_buff *skb;
81 u32 header_len = 0;
82 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
83
84 if (header)
85 header_len = H2C_HEADER_LEN;
86
87 skb = dev_alloc_skb(len + header_len + h2c_desc_size);
88 if (!skb)
89 return NULL;
90 skb_reserve(skb, header_len + h2c_desc_size);
91 memset(skb->data, 0, len);
92
93 return skb;
94 }
95
rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev * rtwdev,u32 len)96 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
97 {
98 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
99 }
100
rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev * rtwdev,u32 len)101 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
102 {
103 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
104 }
105
rtw89_fw_check_rdy(struct rtw89_dev * rtwdev,enum rtw89_fwdl_check_type type)106 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
107 {
108 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
109 u8 val;
110 int ret;
111
112 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val,
113 val == RTW89_FWDL_WCPU_FW_INIT_RDY,
114 1, FWDL_WAIT_CNT, false, rtwdev, type);
115 if (ret) {
116 switch (val) {
117 case RTW89_FWDL_CHECKSUM_FAIL:
118 rtw89_err(rtwdev, "fw checksum fail\n");
119 return -EINVAL;
120
121 case RTW89_FWDL_SECURITY_FAIL:
122 rtw89_err(rtwdev, "fw security fail\n");
123 return -EINVAL;
124
125 case RTW89_FWDL_CV_NOT_MATCH:
126 rtw89_err(rtwdev, "fw cv not match\n");
127 return -EINVAL;
128
129 default:
130 rtw89_err(rtwdev, "fw unexpected status %d\n", val);
131 return -EBUSY;
132 }
133 }
134
135 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
136
137 return 0;
138 }
139
rtw89_fw_hdr_parser_v0(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)140 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
141 struct rtw89_fw_bin_info *info)
142 {
143 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
144 const struct rtw89_chip_info *chip = rtwdev->chip;
145 struct rtw89_fw_hdr_section_info *section_info;
146 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
147 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
148 const struct rtw89_fw_hdr_section *section;
149 const u8 *fw_end = fw + len;
150 const u8 *bin;
151 u32 base_hdr_len;
152 u32 mssc_len;
153 int ret;
154 u32 i;
155
156 if (!info)
157 return -EINVAL;
158
159 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
160 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
161 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
162 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE);
163
164 if (info->dynamic_hdr_en) {
165 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
166 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
167 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
168 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
169 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
170 return -EINVAL;
171 }
172 } else {
173 info->hdr_len = base_hdr_len;
174 info->dynamic_hdr_len = 0;
175 }
176
177 bin = fw + info->hdr_len;
178
179 /* jump to section header */
180 section_info = info->section_info;
181 for (i = 0; i < info->section_num; i++) {
182 section = &fw_hdr->sections[i];
183 section_info->type =
184 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
185 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
186
187 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
188 section_info->len += FWDL_SECTION_CHKSUM_LEN;
189 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
190 section_info->dladdr =
191 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
192 section_info->addr = bin;
193
194 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
195 section_info->mssc =
196 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
197
198 ret = __parse_security_section(rtwdev, info, section_info,
199 bin, &mssc_len);
200 if (ret)
201 return ret;
202
203 if (sec->secure_boot && chip->chip_id == RTL8852B)
204 section_info->len_override = 960;
205 } else {
206 section_info->mssc = 0;
207 mssc_len = 0;
208 }
209
210 rtw89_debug(rtwdev, RTW89_DBG_FW,
211 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
212 i, section_info->type, section_info->len,
213 section_info->mssc, mssc_len, bin - fw);
214 rtw89_debug(rtwdev, RTW89_DBG_FW,
215 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
216 section_info->ignore, section_info->key_addr,
217 section_info->key_addr ?
218 section_info->key_addr - section_info->addr : 0,
219 section_info->key_len, section_info->key_idx);
220
221 bin += section_info->len + mssc_len;
222 section_info++;
223 }
224
225 if (fw_end != bin) {
226 rtw89_err(rtwdev, "[ERR]fw bin size\n");
227 return -EINVAL;
228 }
229
230 return 0;
231 }
232
__get_mssc_key_idx(struct rtw89_dev * rtwdev,const struct rtw89_fw_mss_pool_hdr * mss_hdr,u32 rmp_tbl_size,u32 * key_idx)233 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
234 const struct rtw89_fw_mss_pool_hdr *mss_hdr,
235 u32 rmp_tbl_size, u32 *key_idx)
236 {
237 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
238 u32 sel_byte_idx;
239 u32 mss_sel_idx;
240 u8 sel_bit_idx;
241 int i;
242
243 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
244 if (!mss_hdr->defen)
245 return -ENOENT;
246
247 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
248 sec->mss_key_num;
249 } else {
250 if (mss_hdr->defen)
251 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
252 else
253 mss_sel_idx = 0;
254 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
255 le16_to_cpu(mss_hdr->msscust_max) +
256 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
257 sec->mss_key_num;
258 }
259
260 sel_byte_idx = mss_sel_idx >> 3;
261 sel_bit_idx = mss_sel_idx & 0x7;
262
263 if (sel_byte_idx >= rmp_tbl_size)
264 return -EFAULT;
265
266 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
267 return -ENOENT;
268
269 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
270
271 for (i = 0; i < sel_byte_idx; i++)
272 *key_idx += hweight8(mss_hdr->rmp_tbl[i]);
273
274 return 0;
275 }
276
__parse_formatted_mssc(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content,u32 * mssc_len)277 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
278 struct rtw89_fw_bin_info *info,
279 struct rtw89_fw_hdr_section_info *section_info,
280 const void *content,
281 u32 *mssc_len)
282 {
283 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
284 const union rtw89_fw_section_mssc_content *section_content = content;
285 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
286 u32 rmp_tbl_size;
287 u32 key_sign_len;
288 u32 real_key_idx;
289 u32 sb_sel_ver;
290 int ret;
291
292 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
293 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
294 return -ENOENT;
295 }
296
297 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
298 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
299 le16_to_cpu(mss_hdr->msscust_max) *
300 mss_hdr->mssdev_max) >> 3;
301 if (mss_hdr->defen)
302 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
303 } else {
304 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
305 mss_hdr->rmpfmt);
306 return -EINVAL;
307 }
308
309 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
310 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
311 rmp_tbl_size, (int)sizeof(*mss_hdr),
312 le32_to_cpu(mss_hdr->key_raw_offset));
313 return -EINVAL;
314 }
315
316 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
317 if (!key_sign_len)
318 key_sign_len = 512;
319
320 if (info->dsp_checksum)
321 key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
322
323 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
324 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
325
326 if (!sec->secure_boot)
327 goto out;
328
329 sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v);
330 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
331 goto ignore;
332
333 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
334 if (ret)
335 goto ignore;
336
337 section_info->key_addr = content + section_info->len +
338 le32_to_cpu(mss_hdr->key_raw_offset) +
339 key_sign_len * real_key_idx;
340 section_info->key_len = key_sign_len;
341 section_info->key_idx = real_key_idx;
342
343 out:
344 if (info->secure_section_exist) {
345 section_info->ignore = true;
346 return 0;
347 }
348
349 info->secure_section_exist = true;
350
351 return 0;
352
353 ignore:
354 section_info->ignore = true;
355
356 return 0;
357 }
358
__check_secure_blacklist(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content)359 static int __check_secure_blacklist(struct rtw89_dev *rtwdev,
360 struct rtw89_fw_bin_info *info,
361 struct rtw89_fw_hdr_section_info *section_info,
362 const void *content)
363 {
364 const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist;
365 const union rtw89_fw_section_mssc_content *section_content = content;
366 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
367 u8 byte_idx;
368 u8 bit_mask;
369
370 if (!sec->secure_boot)
371 return 0;
372
373 if (!info->secure_section_exist || section_info->ignore)
374 return 0;
375
376 if (!chip_blacklist) {
377 rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n");
378 return -ENOENT;
379 }
380
381 byte_idx = section_content->blacklist.bit_in_chip_list >> 3;
382 bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7);
383
384 if (section_content->blacklist.ver > chip_blacklist->ver) {
385 rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n",
386 section_content->blacklist.ver, chip_blacklist->ver);
387 return -EINVAL;
388 }
389
390 if (chip_blacklist->list[byte_idx] & bit_mask) {
391 rtw89_warn(rtwdev, "firmware %u in chip blacklist\n",
392 section_content->blacklist.ver);
393 return -EPERM;
394 }
395
396 return 0;
397 }
398
__parse_security_section(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content,u32 * mssc_len)399 static int __parse_security_section(struct rtw89_dev *rtwdev,
400 struct rtw89_fw_bin_info *info,
401 struct rtw89_fw_hdr_section_info *section_info,
402 const void *content,
403 u32 *mssc_len)
404 {
405 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
406 int ret;
407
408 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) {
409 ret = __parse_formatted_mssc(rtwdev, info, section_info,
410 content, mssc_len);
411 if (ret)
412 return -EINVAL;
413 } else {
414 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
415 if (info->dsp_checksum)
416 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
417
418 if (sec->secure_boot) {
419 if (sec->mss_idx >= section_info->mssc) {
420 rtw89_err(rtwdev, "unexpected MSS %d >= %d\n",
421 sec->mss_idx, section_info->mssc);
422 return -EFAULT;
423 }
424 section_info->key_addr = content + section_info->len +
425 sec->mss_idx * FWDL_SECURITY_SIGLEN;
426 section_info->key_len = FWDL_SECURITY_SIGLEN;
427 }
428
429 info->secure_section_exist = true;
430 }
431
432 ret = __check_secure_blacklist(rtwdev, info, section_info, content);
433 WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n");
434
435 return 0;
436 }
437
rtw89_fw_hdr_parser_v1(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)438 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
439 struct rtw89_fw_bin_info *info)
440 {
441 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
442 struct rtw89_fw_hdr_section_info *section_info;
443 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
444 const struct rtw89_fw_hdr_section_v1 *section;
445 const u8 *fw_end = fw + len;
446 const u8 *bin;
447 u32 base_hdr_len;
448 u32 mssc_len;
449 int ret;
450 u32 i;
451
452 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
453 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
454 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
455 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
456 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE);
457
458 if (info->dynamic_hdr_en) {
459 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
460 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
461 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
462 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
463 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
464 return -EINVAL;
465 }
466 } else {
467 info->hdr_len = base_hdr_len;
468 info->dynamic_hdr_len = 0;
469 }
470
471 bin = fw + info->hdr_len;
472
473 /* jump to section header */
474 section_info = info->section_info;
475 for (i = 0; i < info->section_num; i++) {
476 section = &fw_hdr->sections[i];
477
478 section_info->type =
479 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
480 section_info->len =
481 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
482 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
483 section_info->len += FWDL_SECTION_CHKSUM_LEN;
484 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
485 section_info->dladdr =
486 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
487 section_info->addr = bin;
488
489 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
490 section_info->mssc =
491 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
492
493 ret = __parse_security_section(rtwdev, info, section_info,
494 bin, &mssc_len);
495 if (ret)
496 return ret;
497 } else {
498 section_info->mssc = 0;
499 mssc_len = 0;
500 }
501
502 rtw89_debug(rtwdev, RTW89_DBG_FW,
503 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
504 i, section_info->type, section_info->len,
505 section_info->mssc, mssc_len, bin - fw);
506 rtw89_debug(rtwdev, RTW89_DBG_FW,
507 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
508 section_info->ignore, section_info->key_addr,
509 section_info->key_addr ?
510 section_info->key_addr - section_info->addr : 0,
511 section_info->key_len, section_info->key_idx);
512
513 bin += section_info->len + mssc_len;
514 section_info++;
515 }
516
517 if (fw_end != bin) {
518 rtw89_err(rtwdev, "[ERR]fw bin size\n");
519 return -EINVAL;
520 }
521
522 if (!info->secure_section_exist)
523 rtw89_warn(rtwdev, "no firmware secure section\n");
524
525 return 0;
526 }
527
rtw89_fw_hdr_parser(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)528 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
529 const struct rtw89_fw_suit *fw_suit,
530 struct rtw89_fw_bin_info *info)
531 {
532 const u8 *fw = fw_suit->data;
533 u32 len = fw_suit->size;
534
535 if (!fw || !len) {
536 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
537 return -ENOENT;
538 }
539
540 switch (fw_suit->hdr_ver) {
541 case 0:
542 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
543 case 1:
544 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
545 default:
546 return -ENOENT;
547 }
548 }
549
550 static
rtw89_mfw_get_hdr_ptr(struct rtw89_dev * rtwdev,const struct firmware * firmware)551 const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev,
552 const struct firmware *firmware)
553 {
554 const struct rtw89_mfw_hdr *mfw_hdr;
555
556 if (sizeof(*mfw_hdr) > firmware->size)
557 return NULL;
558
559 mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0];
560
561 if (mfw_hdr->sig != RTW89_MFW_SIG)
562 return NULL;
563
564 return mfw_hdr;
565 }
566
rtw89_mfw_validate_hdr(struct rtw89_dev * rtwdev,const struct firmware * firmware,const struct rtw89_mfw_hdr * mfw_hdr)567 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev,
568 const struct firmware *firmware,
569 const struct rtw89_mfw_hdr *mfw_hdr)
570 {
571 const void *mfw = firmware->data;
572 u32 mfw_len = firmware->size;
573 u8 fw_nr = mfw_hdr->fw_nr;
574 const void *ptr;
575
576 if (fw_nr == 0) {
577 rtw89_err(rtwdev, "mfw header has no fw entry\n");
578 return -ENOENT;
579 }
580
581 ptr = &mfw_hdr->info[fw_nr];
582
583 if (ptr > mfw + mfw_len) {
584 rtw89_err(rtwdev, "mfw header out of address\n");
585 return -EFAULT;
586 }
587
588 return 0;
589 }
590
591 static
rtw89_mfw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit,bool nowarn)592 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
593 struct rtw89_fw_suit *fw_suit, bool nowarn)
594 {
595 struct rtw89_fw_info *fw_info = &rtwdev->fw;
596 const struct firmware *firmware = fw_info->req.firmware;
597 const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
598 const struct rtw89_mfw_hdr *mfw_hdr;
599 const u8 *mfw = firmware->data;
600 u32 mfw_len = firmware->size;
601 int ret;
602 int i;
603
604 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
605 if (!mfw_hdr) {
606 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
607 /* legacy firmware support normal type only */
608 if (type != RTW89_FW_NORMAL)
609 return -EINVAL;
610 fw_suit->data = mfw;
611 fw_suit->size = mfw_len;
612 return 0;
613 }
614
615 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
616 if (ret)
617 return ret;
618
619 for (i = 0; i < mfw_hdr->fw_nr; i++) {
620 tmp = &mfw_hdr->info[i];
621 if (tmp->type != type)
622 continue;
623
624 if (type == RTW89_FW_LOGFMT) {
625 mfw_info = tmp;
626 goto found;
627 }
628
629 /* Version order of WiFi firmware in firmware file are not in order,
630 * pass all firmware to find the equal or less but closest version.
631 */
632 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) {
633 if (!mfw_info || mfw_info->cv < tmp->cv)
634 mfw_info = tmp;
635 }
636 }
637
638 if (mfw_info)
639 goto found;
640
641 if (!nowarn)
642 rtw89_err(rtwdev, "no suitable firmware found\n");
643 return -ENOENT;
644
645 found:
646 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
647 fw_suit->size = le32_to_cpu(mfw_info->size);
648
649 if (fw_suit->data + fw_suit->size > mfw + mfw_len) {
650 rtw89_err(rtwdev, "fw_suit %d out of address\n", type);
651 return -EFAULT;
652 }
653
654 return 0;
655 }
656
rtw89_mfw_get_size(struct rtw89_dev * rtwdev)657 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
658 {
659 struct rtw89_fw_info *fw_info = &rtwdev->fw;
660 const struct firmware *firmware = fw_info->req.firmware;
661 const struct rtw89_mfw_info *mfw_info;
662 const struct rtw89_mfw_hdr *mfw_hdr;
663 u32 size;
664 int ret;
665
666 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
667 if (!mfw_hdr) {
668 rtw89_warn(rtwdev, "not mfw format\n");
669 return 0;
670 }
671
672 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
673 if (ret)
674 return ret;
675
676 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
677 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
678
679 return size;
680 }
681
rtw89_fw_update_ver_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr * hdr)682 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
683 struct rtw89_fw_suit *fw_suit,
684 const struct rtw89_fw_hdr *hdr)
685 {
686 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
687 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
688 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
689 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
690 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
691 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
692 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
693 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
694 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
695 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
696 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
697 }
698
rtw89_fw_update_ver_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr_v1 * hdr)699 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
700 struct rtw89_fw_suit *fw_suit,
701 const struct rtw89_fw_hdr_v1 *hdr)
702 {
703 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
704 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
705 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
706 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
707 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
708 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
709 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
710 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
711 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
712 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
713 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
714 }
715
rtw89_fw_update_ver(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit)716 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
717 enum rtw89_fw_type type,
718 struct rtw89_fw_suit *fw_suit)
719 {
720 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
721 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
722
723 if (type == RTW89_FW_LOGFMT)
724 return 0;
725
726 fw_suit->type = type;
727 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
728
729 switch (fw_suit->hdr_ver) {
730 case 0:
731 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
732 break;
733 case 1:
734 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
735 break;
736 default:
737 rtw89_err(rtwdev, "Unknown firmware header version %u\n",
738 fw_suit->hdr_ver);
739 return -ENOENT;
740 }
741
742 rtw89_info(rtwdev,
743 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
744 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
745 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
746
747 return 0;
748 }
749
750 static
__rtw89_fw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool nowarn)751 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
752 bool nowarn)
753 {
754 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
755 int ret;
756
757 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
758 if (ret)
759 return ret;
760
761 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
762 }
763
764 static
__rtw89_fw_recognize_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)765 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
766 const struct rtw89_fw_element_hdr *elm,
767 const union rtw89_fw_element_arg arg)
768 {
769 enum rtw89_fw_type type = arg.fw_type;
770 struct rtw89_hal *hal = &rtwdev->hal;
771 struct rtw89_fw_suit *fw_suit;
772
773 /* Version of BB MCU is in decreasing order in firmware file, so take
774 * first equal or less version, which is equal or less but closest version.
775 */
776 if (hal->cv < elm->u.bbmcu.cv)
777 return 1; /* ignore this element */
778
779 fw_suit = rtw89_fw_suit_get(rtwdev, type);
780 if (fw_suit->data)
781 return 1; /* ignore this element (a firmware is taken already) */
782
783 fw_suit->data = elm->u.bbmcu.contents;
784 fw_suit->size = le32_to_cpu(elm->size);
785
786 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
787 }
788
789 #define __DEF_FW_FEAT_COND(__cond, __op) \
790 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
791 { \
792 return suit_ver_code __op comp_ver_code; \
793 }
794
795 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
796 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
797 __DEF_FW_FEAT_COND(lt, <); /* less than */
798
799 struct __fw_feat_cfg {
800 enum rtw89_core_chip_id chip_id;
801 enum rtw89_fw_feature feature;
802 u32 ver_code;
803 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
804 };
805
806 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
807 { \
808 .chip_id = _chip, \
809 .feature = RTW89_FW_FEATURE_ ## _feat, \
810 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
811 .cond = __fw_feat_cond_ ## _cond, \
812 }
813
814 static const struct __fw_feat_cfg fw_feat_tbl[] = {
815 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
816 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
817 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER_TYPE_0),
818 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
819 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
820 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
821 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER_TYPE_0),
822 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX),
823 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
824 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
825 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
826 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER_TYPE_0),
827 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
828 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
829 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
830 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
831 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1),
832 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP),
833 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
834 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
835 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0),
836 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
837 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
838 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP),
839 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
840 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1),
841 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
842 __CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0),
843 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
844 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
845 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER_TYPE_0),
846 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
847 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
848 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1),
849 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0),
850 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
851 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
852 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
853 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
854 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
855 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 28, 0, RFK_IQK_V0),
856 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0),
857 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
858 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
859 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
860 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
861 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1),
862 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
863 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
864 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
865 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG),
866 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1),
867 };
868
rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info * fw,const struct rtw89_chip_info * chip,u32 ver_code)869 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
870 const struct rtw89_chip_info *chip,
871 u32 ver_code)
872 {
873 int i;
874
875 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
876 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
877
878 if (chip->chip_id != ent->chip_id)
879 continue;
880
881 if (ent->cond(ver_code, ent->ver_code))
882 RTW89_SET_FW_FEATURE(ent->feature, fw);
883 }
884 }
885
rtw89_fw_recognize_features(struct rtw89_dev * rtwdev)886 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
887 {
888 const struct rtw89_chip_info *chip = rtwdev->chip;
889 const struct rtw89_fw_suit *fw_suit;
890 u32 suit_ver_code;
891
892 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
893 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
894
895 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
896 }
897
898 const struct firmware *
rtw89_early_fw_feature_recognize(struct device * device,const struct rtw89_chip_info * chip,struct rtw89_fw_info * early_fw,int * used_fw_format)899 rtw89_early_fw_feature_recognize(struct device *device,
900 const struct rtw89_chip_info *chip,
901 struct rtw89_fw_info *early_fw,
902 int *used_fw_format)
903 {
904 const struct firmware *firmware;
905 char fw_name[64];
906 int fw_format;
907 u32 ver_code;
908 int ret;
909
910 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
911 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
912 chip->fw_basename, fw_format);
913
914 ret = request_firmware(&firmware, fw_name, device);
915 if (!ret) {
916 dev_info(device, "loaded firmware %s\n", fw_name);
917 *used_fw_format = fw_format;
918 break;
919 }
920 }
921
922 if (ret) {
923 dev_err(device, "failed to early request firmware: %d\n", ret);
924 return NULL;
925 }
926
927 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
928
929 if (!ver_code)
930 goto out;
931
932 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
933
934 out:
935 return firmware;
936 }
937
rtw89_fw_validate_ver_required(struct rtw89_dev * rtwdev)938 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev)
939 {
940 const struct rtw89_chip_variant *variant = rtwdev->variant;
941 const struct rtw89_fw_suit *fw_suit;
942 u32 suit_ver_code;
943
944 if (!variant)
945 return 0;
946
947 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
948 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
949
950 if (variant->fw_min_ver_code > suit_ver_code) {
951 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n",
952 variant->fw_min_ver_code);
953 return -ENOENT;
954 }
955
956 return 0;
957 }
958
rtw89_fw_recognize(struct rtw89_dev * rtwdev)959 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
960 {
961 const struct rtw89_chip_info *chip = rtwdev->chip;
962 int ret;
963
964 if (chip->try_ce_fw) {
965 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
966 if (!ret)
967 goto normal_done;
968 }
969
970 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
971 if (ret)
972 return ret;
973
974 normal_done:
975 ret = rtw89_fw_validate_ver_required(rtwdev);
976 if (ret)
977 return ret;
978
979 /* It still works if wowlan firmware isn't existing. */
980 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
981
982 /* It still works if log format file isn't existing. */
983 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
984
985 rtw89_fw_recognize_features(rtwdev);
986
987 rtw89_coex_recognize_ver(rtwdev);
988
989 return 0;
990 }
991
992 static
rtw89_build_phy_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)993 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
994 const struct rtw89_fw_element_hdr *elm,
995 const union rtw89_fw_element_arg arg)
996 {
997 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
998 struct rtw89_phy_table *tbl;
999 struct rtw89_reg2_def *regs;
1000 enum rtw89_rf_path rf_path;
1001 u32 n_regs, i;
1002 u8 idx;
1003
1004 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1005 if (!tbl)
1006 return -ENOMEM;
1007
1008 switch (le32_to_cpu(elm->id)) {
1009 case RTW89_FW_ELEMENT_ID_BB_REG:
1010 elm_info->bb_tbl = tbl;
1011 break;
1012 case RTW89_FW_ELEMENT_ID_BB_GAIN:
1013 elm_info->bb_gain = tbl;
1014 break;
1015 case RTW89_FW_ELEMENT_ID_RADIO_A:
1016 case RTW89_FW_ELEMENT_ID_RADIO_B:
1017 case RTW89_FW_ELEMENT_ID_RADIO_C:
1018 case RTW89_FW_ELEMENT_ID_RADIO_D:
1019 rf_path = arg.rf_path;
1020 idx = elm->u.reg2.idx;
1021
1022 elm_info->rf_radio[idx] = tbl;
1023 tbl->rf_path = rf_path;
1024 tbl->config = rtw89_phy_config_rf_reg_v1;
1025 break;
1026 case RTW89_FW_ELEMENT_ID_RF_NCTL:
1027 elm_info->rf_nctl = tbl;
1028 break;
1029 default:
1030 kfree(tbl);
1031 return -ENOENT;
1032 }
1033
1034 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
1035 regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL);
1036 if (!regs)
1037 goto out;
1038
1039 for (i = 0; i < n_regs; i++) {
1040 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
1041 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
1042 }
1043
1044 tbl->n_regs = n_regs;
1045 tbl->regs = regs;
1046
1047 return 0;
1048
1049 out:
1050 kfree(tbl);
1051 return -ENOMEM;
1052 }
1053
1054 static
rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1055 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev,
1056 const struct rtw89_fw_element_hdr *elm,
1057 const union rtw89_fw_element_arg arg)
1058 {
1059 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr;
1060 const unsigned long offset = arg.offset;
1061 struct rtw89_efuse *efuse = &rtwdev->efuse;
1062 struct rtw89_txpwr_conf *conf;
1063
1064 if (!rtwdev->rfe_data) {
1065 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL);
1066 if (!rtwdev->rfe_data)
1067 return -ENOMEM;
1068 }
1069
1070 conf = (void *)rtwdev->rfe_data + offset;
1071
1072 /* if multiple matched, take the last eventually */
1073 if (txpwr_elm->rfe_type == efuse->rfe_type)
1074 goto setup;
1075
1076 /* without one is matched, accept default */
1077 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE &&
1078 (!rtw89_txpwr_conf_valid(conf) ||
1079 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE))
1080 goto setup;
1081
1082 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n",
1083 elm->id, txpwr_elm->rfe_type);
1084 return 0;
1085
1086 setup:
1087 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n",
1088 elm->id, txpwr_elm->rfe_type);
1089
1090 conf->rfe_type = txpwr_elm->rfe_type;
1091 conf->ent_sz = txpwr_elm->ent_sz;
1092 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents);
1093 conf->data = txpwr_elm->content;
1094 return 0;
1095 }
1096
1097 static
rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1098 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
1099 const struct rtw89_fw_element_hdr *elm,
1100 const union rtw89_fw_element_arg arg)
1101 {
1102 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1103 const struct rtw89_chip_info *chip = rtwdev->chip;
1104 u32 needed_bitmap = 0;
1105 u32 offset = 0;
1106 int subband;
1107 u32 bitmap;
1108 int type;
1109
1110 if (chip->support_bands & BIT(NL80211_BAND_6GHZ))
1111 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ;
1112 if (chip->support_bands & BIT(NL80211_BAND_5GHZ))
1113 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ;
1114 if (chip->support_bands & BIT(NL80211_BAND_2GHZ))
1115 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ;
1116
1117 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
1118
1119 if ((bitmap & needed_bitmap) != needed_bitmap) {
1120 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n",
1121 needed_bitmap, bitmap);
1122 return -ENOENT;
1123 }
1124
1125 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL);
1126 if (!elm_info->txpwr_trk)
1127 return -ENOMEM;
1128
1129 for (type = 0; bitmap; type++, bitmap >>= 1) {
1130 if (!(bitmap & BIT(0)))
1131 continue;
1132
1133 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START &&
1134 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX)
1135 subband = 4;
1136 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START &&
1137 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX)
1138 subband = 3;
1139 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START &&
1140 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX)
1141 subband = 1;
1142 else
1143 break;
1144
1145 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset];
1146
1147 offset += subband;
1148 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size))
1149 goto err;
1150 }
1151
1152 return 0;
1153
1154 err:
1155 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n",
1156 offset, le32_to_cpu(elm->size));
1157 kfree(elm_info->txpwr_trk);
1158 elm_info->txpwr_trk = NULL;
1159
1160 return -EFAULT;
1161 }
1162
1163 static
rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1164 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev,
1165 const struct rtw89_fw_element_hdr *elm,
1166 const union rtw89_fw_element_arg arg)
1167 {
1168 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1169 u8 rfk_id;
1170
1171 if (elm_info->rfk_log_fmt)
1172 goto allocated;
1173
1174 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL);
1175 if (!elm_info->rfk_log_fmt)
1176 return 1; /* this is an optional element, so just ignore this */
1177
1178 allocated:
1179 rfk_id = elm->u.rfk_log_fmt.rfk_id;
1180 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM)
1181 return 1;
1182
1183 elm_info->rfk_log_fmt->elm[rfk_id] = elm;
1184
1185 return 0;
1186 }
1187
rtw89_regd_entcpy(struct rtw89_regd * regd,const void * cursor,u8 cursor_size)1188 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor,
1189 u8 cursor_size)
1190 {
1191 /* fill default values if needed for backward compatibility */
1192 struct rtw89_fw_regd_entry entry = {
1193 .rule_2ghz = RTW89_NA,
1194 .rule_5ghz = RTW89_NA,
1195 .rule_6ghz = RTW89_NA,
1196 .fmap = cpu_to_le32(0x0),
1197 };
1198 u8 valid_size = min_t(u8, sizeof(entry), cursor_size);
1199 unsigned int i;
1200 u32 fmap;
1201
1202 memcpy(&entry, cursor, valid_size);
1203 memset(regd, 0, sizeof(*regd));
1204
1205 regd->alpha2[0] = entry.alpha2_0;
1206 regd->alpha2[1] = entry.alpha2_1;
1207 regd->alpha2[2] = '\0';
1208
1209 /* also need to consider forward compatibility */
1210 regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ?
1211 entry.rule_2ghz : RTW89_NA;
1212 regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ?
1213 entry.rule_5ghz : RTW89_NA;
1214 regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ?
1215 entry.rule_6ghz : RTW89_NA;
1216
1217 BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap));
1218 BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC);
1219
1220 fmap = le32_to_cpu(entry.fmap);
1221 for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) {
1222 if (fmap & BIT(i))
1223 set_bit(i, regd->func_bitmap);
1224 }
1225
1226 return true;
1227 }
1228
1229 #define rtw89_for_each_in_regd_element(regd, element) \
1230 for (const void *cursor = (element)->content, \
1231 *end = (element)->content + \
1232 le32_to_cpu((element)->num_ents) * (element)->ent_sz; \
1233 cursor < end; cursor += (element)->ent_sz) \
1234 if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz))
1235
1236 static
rtw89_recognize_regd_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1237 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
1238 const struct rtw89_fw_element_hdr *elm,
1239 const union rtw89_fw_element_arg arg)
1240 {
1241 const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd;
1242 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1243 u32 num_ents = le32_to_cpu(regd_elm->num_ents);
1244 struct rtw89_regd_data *p;
1245 struct rtw89_regd regd;
1246 u32 i = 0;
1247
1248 if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) {
1249 rtw89_warn(rtwdev,
1250 "regd element ents (%d) are over max num (%d)\n",
1251 num_ents, RTW89_REGD_MAX_COUNTRY_NUM);
1252 rtw89_warn(rtwdev,
1253 "regd element ignore and take another/common\n");
1254 return 1;
1255 }
1256
1257 if (elm_info->regd) {
1258 rtw89_debug(rtwdev, RTW89_DBG_REGD,
1259 "regd element take the latter\n");
1260 devm_kfree(rtwdev->dev, elm_info->regd);
1261 elm_info->regd = NULL;
1262 }
1263
1264 p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL);
1265 if (!p)
1266 return -ENOMEM;
1267
1268 p->nr = num_ents;
1269 rtw89_for_each_in_regd_element(®d, regd_elm)
1270 p->map[i++] = regd;
1271
1272 if (i != num_ents) {
1273 rtw89_err(rtwdev, "regd element has %d invalid ents\n",
1274 num_ents - i);
1275 devm_kfree(rtwdev->dev, p);
1276 return -EINVAL;
1277 }
1278
1279 elm_info->regd = p;
1280 return 0;
1281 }
1282
1283 static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
1284 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
1285 { .fw_type = RTW89_FW_BBMCU0 }, NULL},
1286 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
1287 { .fw_type = RTW89_FW_BBMCU1 }, NULL},
1288 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"},
1289 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL},
1290 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
1291 { .rf_path = RF_PATH_A }, "radio A"},
1292 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
1293 { .rf_path = RF_PATH_B }, NULL},
1294 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
1295 { .rf_path = RF_PATH_C }, NULL},
1296 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
1297 { .rf_path = RF_PATH_D }, NULL},
1298 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"},
1299 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = {
1300 rtw89_fw_recognize_txpwr_from_elm,
1301 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR",
1302 },
1303 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = {
1304 rtw89_fw_recognize_txpwr_from_elm,
1305 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL,
1306 },
1307 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = {
1308 rtw89_fw_recognize_txpwr_from_elm,
1309 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL,
1310 },
1311 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = {
1312 rtw89_fw_recognize_txpwr_from_elm,
1313 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
1314 },
1315 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = {
1316 rtw89_fw_recognize_txpwr_from_elm,
1317 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL,
1318 },
1319 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = {
1320 rtw89_fw_recognize_txpwr_from_elm,
1321 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL,
1322 },
1323 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = {
1324 rtw89_fw_recognize_txpwr_from_elm,
1325 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL,
1326 },
1327 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
1328 rtw89_fw_recognize_txpwr_from_elm,
1329 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
1330 },
1331 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = {
1332 rtw89_fw_recognize_txpwr_from_elm,
1333 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL,
1334 },
1335 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = {
1336 rtw89_fw_recognize_txpwr_from_elm,
1337 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
1338 },
1339 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = {
1340 rtw89_fw_recognize_txpwr_from_elm,
1341 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL,
1342 },
1343 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = {
1344 rtw89_fw_recognize_txpwr_from_elm,
1345 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL,
1346 },
1347 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = {
1348 rtw89_fw_recognize_txpwr_from_elm,
1349 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL,
1350 },
1351 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
1352 rtw89_fw_recognize_txpwr_from_elm,
1353 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
1354 },
1355 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = {
1356 rtw89_fw_recognize_txpwr_from_elm,
1357 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL,
1358 },
1359 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = {
1360 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK",
1361 },
1362 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
1363 rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
1364 },
1365 [RTW89_FW_ELEMENT_ID_REGD] = {
1366 rtw89_recognize_regd_from_elm, {}, "REGD",
1367 },
1368 };
1369
rtw89_fw_recognize_elements(struct rtw89_dev * rtwdev)1370 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
1371 {
1372 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1373 const struct firmware *firmware = fw_info->req.firmware;
1374 const struct rtw89_chip_info *chip = rtwdev->chip;
1375 u32 unrecognized_elements = chip->needed_fw_elms;
1376 const struct rtw89_fw_element_handler *handler;
1377 const struct rtw89_fw_element_hdr *hdr;
1378 u32 elm_size;
1379 u32 elem_id;
1380 u32 offset;
1381 int ret;
1382
1383 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
1384
1385 offset = rtw89_mfw_get_size(rtwdev);
1386 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1387 if (offset == 0)
1388 return -EINVAL;
1389
1390 while (offset + sizeof(*hdr) < firmware->size) {
1391 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
1392
1393 elm_size = le32_to_cpu(hdr->size);
1394 if (offset + elm_size >= firmware->size) {
1395 rtw89_warn(rtwdev, "firmware element size exceeds\n");
1396 break;
1397 }
1398
1399 elem_id = le32_to_cpu(hdr->id);
1400 if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
1401 goto next;
1402
1403 handler = &__fw_element_handlers[elem_id];
1404 if (!handler->fn)
1405 goto next;
1406
1407 ret = handler->fn(rtwdev, hdr, handler->arg);
1408 if (ret == 1) /* ignore this element */
1409 goto next;
1410 if (ret)
1411 return ret;
1412
1413 if (handler->name)
1414 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
1415 handler->name, hdr->ver);
1416
1417 unrecognized_elements &= ~BIT(elem_id);
1418 next:
1419 offset += sizeof(*hdr) + elm_size;
1420 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1421 }
1422
1423 if (unrecognized_elements) {
1424 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
1425 unrecognized_elements);
1426 return -ENOENT;
1427 }
1428
1429 return 0;
1430 }
1431
rtw89_h2c_pkt_set_hdr(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,bool rack,bool dack,u32 len)1432 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
1433 u8 type, u8 cat, u8 class, u8 func,
1434 bool rack, bool dack, u32 len)
1435 {
1436 struct fwcmd_hdr *hdr;
1437
1438 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1439
1440 if (!(rtwdev->fw.h2c_seq % 4))
1441 rack = true;
1442 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1443 FIELD_PREP(H2C_HDR_CAT, cat) |
1444 FIELD_PREP(H2C_HDR_CLASS, class) |
1445 FIELD_PREP(H2C_HDR_FUNC, func) |
1446 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1447
1448 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1449 len + H2C_HEADER_LEN) |
1450 (rack ? H2C_HDR_REC_ACK : 0) |
1451 (dack ? H2C_HDR_DONE_ACK : 0));
1452
1453 rtwdev->fw.h2c_seq++;
1454 }
1455
rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,u32 len)1456 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
1457 struct sk_buff *skb,
1458 u8 type, u8 cat, u8 class, u8 func,
1459 u32 len)
1460 {
1461 struct fwcmd_hdr *hdr;
1462
1463 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1464
1465 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1466 FIELD_PREP(H2C_HDR_CAT, cat) |
1467 FIELD_PREP(H2C_HDR_CLASS, class) |
1468 FIELD_PREP(H2C_HDR_FUNC, func) |
1469 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1470
1471 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1472 len + H2C_HEADER_LEN));
1473 }
1474
__rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr * fw_hdr)1475 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
1476 struct rtw89_fw_bin_info *info,
1477 struct rtw89_fw_hdr *fw_hdr)
1478 {
1479 struct rtw89_fw_hdr_section_info *section_info;
1480 struct rtw89_fw_hdr_section *section;
1481 int i;
1482
1483 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
1484 FW_HDR_W7_PART_SIZE);
1485
1486 for (i = 0; i < info->section_num; i++) {
1487 section_info = &info->section_info[i];
1488
1489 if (!section_info->len_override)
1490 continue;
1491
1492 section = &fw_hdr->sections[i];
1493 le32p_replace_bits(§ion->w1, section_info->len_override,
1494 FWSECTION_HDR_W1_SEC_SIZE);
1495 }
1496
1497 return 0;
1498 }
1499
__rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_v1 * fw_hdr)1500 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
1501 struct rtw89_fw_bin_info *info,
1502 struct rtw89_fw_hdr_v1 *fw_hdr)
1503 {
1504 struct rtw89_fw_hdr_section_info *section_info;
1505 struct rtw89_fw_hdr_section_v1 *section;
1506 u8 dst_sec_idx = 0;
1507 u8 sec_idx;
1508
1509 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN,
1510 FW_HDR_V1_W7_PART_SIZE);
1511
1512 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
1513 section_info = &info->section_info[sec_idx];
1514 section = &fw_hdr->sections[sec_idx];
1515
1516 if (section_info->ignore)
1517 continue;
1518
1519 if (dst_sec_idx != sec_idx)
1520 fw_hdr->sections[dst_sec_idx] = *section;
1521
1522 dst_sec_idx++;
1523 }
1524
1525 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
1526
1527 return (info->section_num - dst_sec_idx) * sizeof(*section);
1528 }
1529
__rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1530 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1531 const struct rtw89_fw_suit *fw_suit,
1532 struct rtw89_fw_bin_info *info)
1533 {
1534 u32 len = info->hdr_len - info->dynamic_hdr_len;
1535 struct rtw89_fw_hdr_v1 *fw_hdr_v1;
1536 const u8 *fw = fw_suit->data;
1537 struct rtw89_fw_hdr *fw_hdr;
1538 struct sk_buff *skb;
1539 u32 truncated;
1540 u32 ret = 0;
1541
1542 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1543 if (!skb) {
1544 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
1545 return -ENOMEM;
1546 }
1547
1548 skb_put_data(skb, fw, len);
1549
1550 switch (fw_suit->hdr_ver) {
1551 case 0:
1552 fw_hdr = (struct rtw89_fw_hdr *)skb->data;
1553 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
1554 break;
1555 case 1:
1556 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
1557 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
1558 break;
1559 default:
1560 ret = -EOPNOTSUPP;
1561 goto fail;
1562 }
1563
1564 if (truncated) {
1565 len -= truncated;
1566 skb_trim(skb, len);
1567 }
1568
1569 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
1570 H2C_CAT_MAC, H2C_CL_MAC_FWDL,
1571 H2C_FUNC_MAC_FWHDR_DL, len);
1572
1573 ret = rtw89_h2c_tx(rtwdev, skb, false);
1574 if (ret) {
1575 rtw89_err(rtwdev, "failed to send h2c\n");
1576 goto fail;
1577 }
1578
1579 return 0;
1580 fail:
1581 dev_kfree_skb_any(skb);
1582
1583 return ret;
1584 }
1585
rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1586 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1587 const struct rtw89_fw_suit *fw_suit,
1588 struct rtw89_fw_bin_info *info)
1589 {
1590 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1591 int ret;
1592
1593 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
1594 if (ret) {
1595 rtw89_err(rtwdev, "[ERR]FW header download\n");
1596 return ret;
1597 }
1598
1599 ret = mac->fwdl_check_path_ready(rtwdev, false);
1600 if (ret) {
1601 rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
1602 return ret;
1603 }
1604
1605 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
1606 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
1607
1608 return 0;
1609 }
1610
__rtw89_fw_download_main(struct rtw89_dev * rtwdev,struct rtw89_fw_hdr_section_info * info)1611 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1612 struct rtw89_fw_hdr_section_info *info)
1613 {
1614 struct sk_buff *skb;
1615 const u8 *section = info->addr;
1616 u32 residue_len = info->len;
1617 bool copy_key = false;
1618 u32 pkt_len;
1619 int ret;
1620
1621 if (info->ignore)
1622 return 0;
1623
1624 if (info->len_override) {
1625 if (info->len_override > info->len)
1626 rtw89_warn(rtwdev, "override length %u larger than original %u\n",
1627 info->len_override, info->len);
1628 else
1629 residue_len = info->len_override;
1630 }
1631
1632 if (info->key_addr && info->key_len) {
1633 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len)
1634 rtw89_warn(rtwdev,
1635 "ignore to copy key data because of len %d, %d, %d, %d\n",
1636 info->len, FWDL_SECTION_PER_PKT_LEN,
1637 info->key_len, residue_len);
1638 else
1639 copy_key = true;
1640 }
1641
1642 while (residue_len) {
1643 if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
1644 pkt_len = FWDL_SECTION_PER_PKT_LEN;
1645 else
1646 pkt_len = residue_len;
1647
1648 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
1649 if (!skb) {
1650 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1651 return -ENOMEM;
1652 }
1653 skb_put_data(skb, section, pkt_len);
1654
1655 if (copy_key)
1656 memcpy(skb->data + pkt_len - info->key_len,
1657 info->key_addr, info->key_len);
1658
1659 ret = rtw89_h2c_tx(rtwdev, skb, true);
1660 if (ret) {
1661 rtw89_err(rtwdev, "failed to send h2c\n");
1662 goto fail;
1663 }
1664
1665 section += pkt_len;
1666 residue_len -= pkt_len;
1667 }
1668
1669 return 0;
1670 fail:
1671 dev_kfree_skb_any(skb);
1672
1673 return ret;
1674 }
1675
1676 static enum rtw89_fwdl_check_type
rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit)1677 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev,
1678 const struct rtw89_fw_suit *fw_suit)
1679 {
1680 switch (fw_suit->type) {
1681 case RTW89_FW_BBMCU0:
1682 return RTW89_FWDL_CHECK_BB0_FWDL_DONE;
1683 case RTW89_FW_BBMCU1:
1684 return RTW89_FWDL_CHECK_BB1_FWDL_DONE;
1685 default:
1686 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE;
1687 }
1688 }
1689
rtw89_fw_download_main(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1690 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1691 const struct rtw89_fw_suit *fw_suit,
1692 struct rtw89_fw_bin_info *info)
1693 {
1694 struct rtw89_fw_hdr_section_info *section_info = info->section_info;
1695 const struct rtw89_chip_info *chip = rtwdev->chip;
1696 enum rtw89_fwdl_check_type chk_type;
1697 u8 section_num = info->section_num;
1698 int ret;
1699
1700 while (section_num--) {
1701 ret = __rtw89_fw_download_main(rtwdev, section_info);
1702 if (ret)
1703 return ret;
1704 section_info++;
1705 }
1706
1707 if (chip->chip_gen == RTW89_CHIP_AX)
1708 return 0;
1709
1710 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit);
1711 ret = rtw89_fw_check_rdy(rtwdev, chk_type);
1712 if (ret) {
1713 rtw89_warn(rtwdev, "failed to download firmware type %u\n",
1714 fw_suit->type);
1715 return ret;
1716 }
1717
1718 return 0;
1719 }
1720
rtw89_fw_prog_cnt_dump(struct rtw89_dev * rtwdev)1721 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
1722 {
1723 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
1724 u32 addr = R_AX_DBG_PORT_SEL;
1725 u32 val32;
1726 u16 index;
1727
1728 if (chip_gen == RTW89_CHIP_BE) {
1729 addr = R_BE_WLCPU_PORT_PC;
1730 goto dump;
1731 }
1732
1733 rtw89_write32(rtwdev, R_AX_DBG_CTRL,
1734 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
1735 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
1736 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
1737
1738 dump:
1739 for (index = 0; index < 15; index++) {
1740 val32 = rtw89_read32(rtwdev, addr);
1741 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
1742 fsleep(10);
1743 }
1744 }
1745
rtw89_fw_dl_fail_dump(struct rtw89_dev * rtwdev)1746 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
1747 {
1748 u32 val32;
1749
1750 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
1751 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
1752
1753 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG);
1754 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32);
1755
1756 rtw89_fw_prog_cnt_dump(rtwdev);
1757 }
1758
rtw89_fw_download_suit(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit)1759 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
1760 struct rtw89_fw_suit *fw_suit)
1761 {
1762 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1763 struct rtw89_fw_bin_info info = {};
1764 int ret;
1765
1766 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
1767 if (ret) {
1768 rtw89_err(rtwdev, "parse fw header fail\n");
1769 return ret;
1770 }
1771
1772 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode);
1773
1774 if (rtwdev->chip->chip_id == RTL8922A &&
1775 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN))
1776 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000);
1777
1778 ret = mac->fwdl_check_path_ready(rtwdev, true);
1779 if (ret) {
1780 rtw89_err(rtwdev, "[ERR]H2C path ready\n");
1781 return ret;
1782 }
1783
1784 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
1785 if (ret)
1786 return ret;
1787
1788 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info);
1789 if (ret)
1790 return ret;
1791
1792 return 0;
1793 }
1794
1795 static
__rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1796 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1797 bool include_bb)
1798 {
1799 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1800 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1801 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
1802 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr;
1803 int ret;
1804 int i;
1805
1806 mac->disable_cpu(rtwdev);
1807 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb);
1808 if (ret)
1809 return ret;
1810
1811 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1812 if (ret)
1813 goto fwdl_err;
1814
1815 for (i = 0; i < bbmcu_nr && include_bb; i++) {
1816 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i);
1817
1818 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1819 if (ret)
1820 goto fwdl_err;
1821 }
1822
1823 fw_info->h2c_seq = 0;
1824 fw_info->rec_seq = 0;
1825 fw_info->h2c_counter = 0;
1826 fw_info->c2h_counter = 0;
1827 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
1828 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
1829
1830 mdelay(5);
1831
1832 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
1833 if (ret) {
1834 rtw89_warn(rtwdev, "download firmware fail\n");
1835 goto fwdl_err;
1836 }
1837
1838 return ret;
1839
1840 fwdl_err:
1841 rtw89_fw_dl_fail_dump(rtwdev);
1842 return ret;
1843 }
1844
rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1845 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1846 bool include_bb)
1847 {
1848 int retry;
1849 int ret;
1850
1851 for (retry = 0; retry < 5; retry++) {
1852 ret = __rtw89_fw_download(rtwdev, type, include_bb);
1853 if (!ret)
1854 return 0;
1855 }
1856
1857 return ret;
1858 }
1859
rtw89_wait_firmware_completion(struct rtw89_dev * rtwdev)1860 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
1861 {
1862 struct rtw89_fw_info *fw = &rtwdev->fw;
1863
1864 wait_for_completion(&fw->req.completion);
1865 if (!fw->req.firmware)
1866 return -EINVAL;
1867
1868 return 0;
1869 }
1870
rtw89_load_firmware_req(struct rtw89_dev * rtwdev,struct rtw89_fw_req_info * req,const char * fw_name,bool nowarn)1871 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
1872 struct rtw89_fw_req_info *req,
1873 const char *fw_name, bool nowarn)
1874 {
1875 int ret;
1876
1877 if (req->firmware) {
1878 rtw89_debug(rtwdev, RTW89_DBG_FW,
1879 "full firmware has been early requested\n");
1880 complete_all(&req->completion);
1881 return 0;
1882 }
1883
1884 if (nowarn)
1885 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
1886 else
1887 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
1888
1889 complete_all(&req->completion);
1890
1891 return ret;
1892 }
1893
rtw89_load_firmware_work(struct work_struct * work)1894 void rtw89_load_firmware_work(struct work_struct *work)
1895 {
1896 struct rtw89_dev *rtwdev =
1897 container_of(work, struct rtw89_dev, load_firmware_work);
1898 const struct rtw89_chip_info *chip = rtwdev->chip;
1899 char fw_name[64];
1900
1901 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
1902 chip->fw_basename, rtwdev->fw.fw_format);
1903
1904 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
1905 }
1906
rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table * tbl)1907 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
1908 {
1909 if (!tbl)
1910 return;
1911
1912 kfree(tbl->regs);
1913 kfree(tbl);
1914 }
1915
rtw89_unload_firmware_elements(struct rtw89_dev * rtwdev)1916 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
1917 {
1918 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1919 int i;
1920
1921 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
1922 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
1923 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
1924 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
1925 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
1926
1927 kfree(elm_info->txpwr_trk);
1928 kfree(elm_info->rfk_log_fmt);
1929 }
1930
rtw89_unload_firmware(struct rtw89_dev * rtwdev)1931 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
1932 {
1933 struct rtw89_fw_info *fw = &rtwdev->fw;
1934
1935 cancel_work_sync(&rtwdev->load_firmware_work);
1936
1937 if (fw->req.firmware) {
1938 release_firmware(fw->req.firmware);
1939
1940 /* assign NULL back in case rtw89_free_ieee80211_hw()
1941 * try to release the same one again.
1942 */
1943 fw->req.firmware = NULL;
1944 }
1945
1946 kfree(fw->log.fmts);
1947 rtw89_unload_firmware_elements(rtwdev);
1948 }
1949
rtw89_fw_log_get_fmt_idx(struct rtw89_dev * rtwdev,u32 fmt_id)1950 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
1951 {
1952 struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
1953 u32 i;
1954
1955 if (fmt_id > fw_log->last_fmt_id)
1956 return 0;
1957
1958 for (i = 0; i < fw_log->fmt_count; i++) {
1959 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
1960 return i;
1961 }
1962 return 0;
1963 }
1964
rtw89_fw_log_create_fmts_dict(struct rtw89_dev * rtwdev)1965 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
1966 {
1967 struct rtw89_fw_log *log = &rtwdev->fw.log;
1968 const struct rtw89_fw_logsuit_hdr *suit_hdr;
1969 struct rtw89_fw_suit *suit = &log->suit;
1970 const void *fmts_ptr, *fmts_end_ptr;
1971 u32 fmt_count;
1972 int i;
1973
1974 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
1975 fmt_count = le32_to_cpu(suit_hdr->count);
1976 log->fmt_ids = suit_hdr->ids;
1977 fmts_ptr = &suit_hdr->ids[fmt_count];
1978 fmts_end_ptr = suit->data + suit->size;
1979 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
1980 if (!log->fmts)
1981 return -ENOMEM;
1982
1983 for (i = 0; i < fmt_count; i++) {
1984 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
1985 if (!fmts_ptr)
1986 break;
1987
1988 (*log->fmts)[i] = fmts_ptr;
1989 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
1990 log->fmt_count++;
1991 fmts_ptr += strlen(fmts_ptr);
1992 }
1993
1994 return 0;
1995 }
1996
rtw89_fw_log_prepare(struct rtw89_dev * rtwdev)1997 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
1998 {
1999 struct rtw89_fw_log *log = &rtwdev->fw.log;
2000 struct rtw89_fw_suit *suit = &log->suit;
2001
2002 if (!suit || !suit->data) {
2003 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
2004 return -EINVAL;
2005 }
2006 if (log->fmts)
2007 return 0;
2008
2009 return rtw89_fw_log_create_fmts_dict(rtwdev);
2010 }
2011
rtw89_fw_log_dump_data(struct rtw89_dev * rtwdev,const struct rtw89_fw_c2h_log_fmt * log_fmt,u32 fmt_idx,u8 para_int,bool raw_data)2012 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
2013 const struct rtw89_fw_c2h_log_fmt *log_fmt,
2014 u32 fmt_idx, u8 para_int, bool raw_data)
2015 {
2016 const char *(*fmts)[] = rtwdev->fw.log.fmts;
2017 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
2018 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
2019 int i;
2020
2021 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
2022 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
2023 log_fmt->argc);
2024 return;
2025 }
2026
2027 if (para_int)
2028 for (i = 0 ; i < log_fmt->argc; i++)
2029 args[i] = le32_to_cpu(log_fmt->u.argv[i]);
2030
2031 if (raw_data) {
2032 if (para_int)
2033 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
2034 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
2035 para_int, log_fmt->argc, (int)sizeof(args), args);
2036 else
2037 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
2038 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
2039 para_int, log_fmt->argc, log_fmt->u.raw);
2040 } else {
2041 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
2042 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
2043 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
2044 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
2045 args[0xf]);
2046 }
2047
2048 rtw89_info(rtwdev, "C2H log: %s", str_buf);
2049 }
2050
rtw89_fw_log_dump(struct rtw89_dev * rtwdev,u8 * buf,u32 len)2051 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
2052 {
2053 const struct rtw89_fw_c2h_log_fmt *log_fmt;
2054 u8 para_int;
2055 u32 fmt_idx;
2056
2057 if (len < RTW89_C2H_HEADER_LEN) {
2058 rtw89_err(rtwdev, "c2h log length is wrong!\n");
2059 return;
2060 }
2061
2062 buf += RTW89_C2H_HEADER_LEN;
2063 len -= RTW89_C2H_HEADER_LEN;
2064 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
2065
2066 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
2067 goto plain_log;
2068
2069 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
2070 goto plain_log;
2071
2072 if (!rtwdev->fw.log.fmts)
2073 return;
2074
2075 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
2076 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
2077
2078 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
2079 rtw89_info(rtwdev, "C2H log: %s%s",
2080 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
2081 else if (fmt_idx != 0 && para_int)
2082 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
2083 else
2084 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
2085 return;
2086
2087 plain_log:
2088 rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
2089
2090 }
2091
2092 #define H2C_CAM_LEN 60
rtw89_fw_h2c_cam(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,const u8 * scan_mac_addr)2093 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
2094 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr)
2095 {
2096 struct sk_buff *skb;
2097 int ret;
2098
2099 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN);
2100 if (!skb) {
2101 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2102 return -ENOMEM;
2103 }
2104 skb_put(skb, H2C_CAM_LEN);
2105 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr,
2106 skb->data);
2107 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data);
2108
2109 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2110 H2C_CAT_MAC,
2111 H2C_CL_MAC_ADDR_CAM_UPDATE,
2112 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
2113 H2C_CAM_LEN);
2114
2115 ret = rtw89_h2c_tx(rtwdev, skb, false);
2116 if (ret) {
2117 rtw89_err(rtwdev, "failed to send h2c\n");
2118 goto fail;
2119 }
2120
2121 return 0;
2122 fail:
2123 dev_kfree_skb_any(skb);
2124
2125 return ret;
2126 }
2127
rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2128 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
2129 struct rtw89_vif_link *rtwvif_link,
2130 struct rtw89_sta_link *rtwsta_link)
2131 {
2132 struct rtw89_h2c_dctlinfo_ud_v1 *h2c;
2133 u32 len = sizeof(*h2c);
2134 struct sk_buff *skb;
2135 int ret;
2136
2137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2138 if (!skb) {
2139 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2140 return -ENOMEM;
2141 }
2142 skb_put(skb, len);
2143 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data;
2144
2145 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c);
2146
2147 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2148 H2C_CAT_MAC,
2149 H2C_CL_MAC_FR_EXCHG,
2150 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
2151 len);
2152
2153 ret = rtw89_h2c_tx(rtwdev, skb, false);
2154 if (ret) {
2155 rtw89_err(rtwdev, "failed to send h2c\n");
2156 goto fail;
2157 }
2158
2159 return 0;
2160 fail:
2161 dev_kfree_skb_any(skb);
2162
2163 return ret;
2164 }
2165 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
2166
rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2167 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
2168 struct rtw89_vif_link *rtwvif_link,
2169 struct rtw89_sta_link *rtwsta_link)
2170 {
2171 struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
2172 u32 len = sizeof(*h2c);
2173 struct sk_buff *skb;
2174 int ret;
2175
2176 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2177 if (!skb) {
2178 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2179 return -ENOMEM;
2180 }
2181 skb_put(skb, len);
2182 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
2183
2184 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c);
2185
2186 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2187 H2C_CAT_MAC,
2188 H2C_CL_MAC_FR_EXCHG,
2189 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
2190 len);
2191
2192 ret = rtw89_h2c_tx(rtwdev, skb, false);
2193 if (ret) {
2194 rtw89_err(rtwdev, "failed to send h2c\n");
2195 goto fail;
2196 }
2197
2198 return 0;
2199 fail:
2200 dev_kfree_skb_any(skb);
2201
2202 return ret;
2203 }
2204 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
2205
rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2206 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
2207 struct rtw89_vif_link *rtwvif_link,
2208 struct rtw89_sta_link *rtwsta_link)
2209 {
2210 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
2211 struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
2212 u32 len = sizeof(*h2c);
2213 struct sk_buff *skb;
2214 int ret;
2215
2216 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2217 if (!skb) {
2218 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
2219 return -ENOMEM;
2220 }
2221 skb_put(skb, len);
2222 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
2223
2224 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
2225 le32_encode_bits(1, DCTLINFO_V2_C0_OP);
2226
2227 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
2228 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
2229 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
2230 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
2231 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
2232 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
2233 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
2234 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
2235 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
2236 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
2237 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
2238 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
2239 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
2240
2241 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2242 H2C_CAT_MAC,
2243 H2C_CL_MAC_FR_EXCHG,
2244 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
2245 len);
2246
2247 ret = rtw89_h2c_tx(rtwdev, skb, false);
2248 if (ret) {
2249 rtw89_err(rtwdev, "failed to send h2c\n");
2250 goto fail;
2251 }
2252
2253 return 0;
2254 fail:
2255 dev_kfree_skb_any(skb);
2256
2257 return ret;
2258 }
2259 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
2260
rtw89_fw_h2c_ba_cam(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool valid,struct ieee80211_ampdu_params * params)2261 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev,
2262 struct rtw89_vif_link *rtwvif_link,
2263 struct rtw89_sta_link *rtwsta_link,
2264 bool valid, struct ieee80211_ampdu_params *params)
2265 {
2266 const struct rtw89_chip_info *chip = rtwdev->chip;
2267 struct rtw89_h2c_ba_cam *h2c;
2268 u8 macid = rtwsta_link->mac_id;
2269 u32 len = sizeof(*h2c);
2270 struct sk_buff *skb;
2271 u8 entry_idx;
2272 int ret;
2273
2274 ret = valid ?
2275 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2276 &entry_idx) :
2277 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2278 &entry_idx);
2279 if (ret) {
2280 /* it still works even if we don't have static BA CAM, because
2281 * hardware can create dynamic BA CAM automatically.
2282 */
2283 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2284 "failed to %s entry tid=%d for h2c ba cam\n",
2285 valid ? "alloc" : "free", params->tid);
2286 return 0;
2287 }
2288
2289 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2290 if (!skb) {
2291 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2292 return -ENOMEM;
2293 }
2294 skb_put(skb, len);
2295 h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2296
2297 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
2298 if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
2299 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
2300 else
2301 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
2302 if (!valid)
2303 goto end;
2304 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
2305 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
2306 if (params->buf_size > 64)
2307 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2308 else
2309 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2310 /* If init req is set, hw will set the ssn */
2311 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
2312 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
2313
2314 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
2315 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
2316 le32_encode_bits(rtwvif_link->mac_idx,
2317 RTW89_H2C_BA_CAM_W1_BAND);
2318 }
2319
2320 end:
2321 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2322 H2C_CAT_MAC,
2323 H2C_CL_BA_CAM,
2324 H2C_FUNC_MAC_BA_CAM, 0, 1,
2325 len);
2326
2327 ret = rtw89_h2c_tx(rtwdev, skb, false);
2328 if (ret) {
2329 rtw89_err(rtwdev, "failed to send h2c\n");
2330 goto fail;
2331 }
2332
2333 return 0;
2334 fail:
2335 dev_kfree_skb_any(skb);
2336
2337 return ret;
2338 }
2339 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
2340
rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev * rtwdev,u8 entry_idx,u8 uid)2341 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
2342 u8 entry_idx, u8 uid)
2343 {
2344 struct rtw89_h2c_ba_cam *h2c;
2345 u32 len = sizeof(*h2c);
2346 struct sk_buff *skb;
2347 int ret;
2348
2349 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2350 if (!skb) {
2351 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
2352 return -ENOMEM;
2353 }
2354 skb_put(skb, len);
2355 h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2356
2357 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
2358 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
2359 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
2360 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
2361 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
2362
2363 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2364 H2C_CAT_MAC,
2365 H2C_CL_BA_CAM,
2366 H2C_FUNC_MAC_BA_CAM, 0, 1,
2367 len);
2368
2369 ret = rtw89_h2c_tx(rtwdev, skb, false);
2370 if (ret) {
2371 rtw89_err(rtwdev, "failed to send h2c\n");
2372 goto fail;
2373 }
2374
2375 return 0;
2376 fail:
2377 dev_kfree_skb_any(skb);
2378
2379 return ret;
2380 }
2381
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev * rtwdev)2382 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
2383 {
2384 const struct rtw89_chip_info *chip = rtwdev->chip;
2385 u8 entry_idx = chip->bacam_num;
2386 u8 uid = 0;
2387 int i;
2388
2389 for (i = 0; i < chip->bacam_dynamic_num; i++) {
2390 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
2391 entry_idx++;
2392 uid++;
2393 }
2394 }
2395
rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool valid,struct ieee80211_ampdu_params * params)2396 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev,
2397 struct rtw89_vif_link *rtwvif_link,
2398 struct rtw89_sta_link *rtwsta_link,
2399 bool valid, struct ieee80211_ampdu_params *params)
2400 {
2401 const struct rtw89_chip_info *chip = rtwdev->chip;
2402 struct rtw89_h2c_ba_cam_v1 *h2c;
2403 u8 macid = rtwsta_link->mac_id;
2404 u32 len = sizeof(*h2c);
2405 struct sk_buff *skb;
2406 u8 entry_idx;
2407 u8 bmap_size;
2408 int ret;
2409
2410 ret = valid ?
2411 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2412 &entry_idx) :
2413 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2414 &entry_idx);
2415 if (ret) {
2416 /* it still works even if we don't have static BA CAM, because
2417 * hardware can create dynamic BA CAM automatically.
2418 */
2419 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2420 "failed to %s entry tid=%d for h2c ba cam\n",
2421 valid ? "alloc" : "free", params->tid);
2422 return 0;
2423 }
2424
2425 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2426 if (!skb) {
2427 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2428 return -ENOMEM;
2429 }
2430 skb_put(skb, len);
2431 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
2432
2433 if (params->buf_size > 512)
2434 bmap_size = 10;
2435 else if (params->buf_size > 256)
2436 bmap_size = 8;
2437 else if (params->buf_size > 64)
2438 bmap_size = 4;
2439 else
2440 bmap_size = 0;
2441
2442 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
2443 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
2444 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
2445 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
2446 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
2447 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
2448
2449 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
2450 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
2451 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
2452 le32_encode_bits(!!rtwvif_link->mac_idx,
2453 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
2454
2455 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2456 H2C_CAT_MAC,
2457 H2C_CL_BA_CAM,
2458 H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
2459 len);
2460
2461 ret = rtw89_h2c_tx(rtwdev, skb, false);
2462 if (ret) {
2463 rtw89_err(rtwdev, "failed to send h2c\n");
2464 goto fail;
2465 }
2466
2467 return 0;
2468 fail:
2469 dev_kfree_skb_any(skb);
2470
2471 return ret;
2472 }
2473 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
2474
rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev * rtwdev,u8 users,u8 offset,u8 mac_idx)2475 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
2476 u8 offset, u8 mac_idx)
2477 {
2478 struct rtw89_h2c_ba_cam_init *h2c;
2479 u32 len = sizeof(*h2c);
2480 struct sk_buff *skb;
2481 int ret;
2482
2483 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2484 if (!skb) {
2485 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
2486 return -ENOMEM;
2487 }
2488 skb_put(skb, len);
2489 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
2490
2491 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
2492 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
2493 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
2494
2495 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2496 H2C_CAT_MAC,
2497 H2C_CL_BA_CAM,
2498 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
2499 len);
2500
2501 ret = rtw89_h2c_tx(rtwdev, skb, false);
2502 if (ret) {
2503 rtw89_err(rtwdev, "failed to send h2c\n");
2504 goto fail;
2505 }
2506
2507 return 0;
2508 fail:
2509 dev_kfree_skb_any(skb);
2510
2511 return ret;
2512 }
2513
2514 #define H2C_LOG_CFG_LEN 12
rtw89_fw_h2c_fw_log(struct rtw89_dev * rtwdev,bool enable)2515 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
2516 {
2517 struct sk_buff *skb;
2518 u32 comp = 0;
2519 int ret;
2520
2521 if (enable)
2522 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
2523 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
2524 BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN);
2525
2526 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
2527 if (!skb) {
2528 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
2529 return -ENOMEM;
2530 }
2531
2532 skb_put(skb, H2C_LOG_CFG_LEN);
2533 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
2534 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
2535 SET_LOG_CFG_COMP(skb->data, comp);
2536 SET_LOG_CFG_COMP_EXT(skb->data, 0);
2537
2538 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2539 H2C_CAT_MAC,
2540 H2C_CL_FW_INFO,
2541 H2C_FUNC_LOG_CFG, 0, 0,
2542 H2C_LOG_CFG_LEN);
2543
2544 ret = rtw89_h2c_tx(rtwdev, skb, false);
2545 if (ret) {
2546 rtw89_err(rtwdev, "failed to send h2c\n");
2547 goto fail;
2548 }
2549
2550 return 0;
2551 fail:
2552 dev_kfree_skb_any(skb);
2553
2554 return ret;
2555 }
2556
rtw89_eapol_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2557 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
2558 struct rtw89_vif_link *rtwvif_link)
2559 {
2560 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88,
2561 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
2562 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2563 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2564 struct rtw89_eapol_2_of_2 *eapol_pkt;
2565 struct ieee80211_bss_conf *bss_conf;
2566 struct ieee80211_hdr_3addr *hdr;
2567 struct sk_buff *skb;
2568 u8 key_des_ver;
2569
2570 if (rtw_wow->ptk_alg == 3)
2571 key_des_ver = 1;
2572 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2)
2573 key_des_ver = 2;
2574 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7)
2575 key_des_ver = 3;
2576 else
2577 key_des_ver = 0;
2578
2579 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt));
2580 if (!skb)
2581 return NULL;
2582
2583 hdr = skb_put_zero(skb, sizeof(*hdr));
2584 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2585 IEEE80211_FCTL_TODS |
2586 IEEE80211_FCTL_PROTECTED);
2587
2588 rcu_read_lock();
2589
2590 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2591
2592 ether_addr_copy(hdr->addr1, bss_conf->bssid);
2593 ether_addr_copy(hdr->addr2, bss_conf->addr);
2594 ether_addr_copy(hdr->addr3, bss_conf->bssid);
2595
2596 rcu_read_unlock();
2597
2598 skb_put_zero(skb, sec_hdr_len);
2599
2600 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
2601 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody));
2602 eapol_pkt->key_des_ver = key_des_ver;
2603
2604 return skb;
2605 }
2606
rtw89_sa_query_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2607 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
2608 struct rtw89_vif_link *rtwvif_link)
2609 {
2610 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2611 struct ieee80211_bss_conf *bss_conf;
2612 struct ieee80211_hdr_3addr *hdr;
2613 struct rtw89_sa_query *sa_query;
2614 struct sk_buff *skb;
2615
2616 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query));
2617 if (!skb)
2618 return NULL;
2619
2620 hdr = skb_put_zero(skb, sizeof(*hdr));
2621 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2622 IEEE80211_STYPE_ACTION |
2623 IEEE80211_FCTL_PROTECTED);
2624
2625 rcu_read_lock();
2626
2627 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2628
2629 ether_addr_copy(hdr->addr1, bss_conf->bssid);
2630 ether_addr_copy(hdr->addr2, bss_conf->addr);
2631 ether_addr_copy(hdr->addr3, bss_conf->bssid);
2632
2633 rcu_read_unlock();
2634
2635 skb_put_zero(skb, sec_hdr_len);
2636
2637 sa_query = skb_put_zero(skb, sizeof(*sa_query));
2638 sa_query->category = WLAN_CATEGORY_SA_QUERY;
2639 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE;
2640
2641 return skb;
2642 }
2643
rtw89_arp_response_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2644 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
2645 struct rtw89_vif_link *rtwvif_link)
2646 {
2647 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
2648 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2649 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2650 struct ieee80211_hdr_3addr *hdr;
2651 struct rtw89_arp_rsp *arp_skb;
2652 struct arphdr *arp_hdr;
2653 struct sk_buff *skb;
2654 __le16 fc;
2655
2656 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb));
2657 if (!skb)
2658 return NULL;
2659
2660 hdr = skb_put_zero(skb, sizeof(*hdr));
2661
2662 if (rtw_wow->ptk_alg)
2663 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS |
2664 IEEE80211_FCTL_PROTECTED);
2665 else
2666 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
2667
2668 hdr->frame_control = fc;
2669 ether_addr_copy(hdr->addr1, rtwvif_link->bssid);
2670 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr);
2671 ether_addr_copy(hdr->addr3, rtwvif_link->bssid);
2672
2673 skb_put_zero(skb, sec_hdr_len);
2674
2675 arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
2676 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
2677 arp_skb->llc_type = htons(ETH_P_ARP);
2678
2679 arp_hdr = &arp_skb->arp_hdr;
2680 arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
2681 arp_hdr->ar_pro = htons(ETH_P_IP);
2682 arp_hdr->ar_hln = ETH_ALEN;
2683 arp_hdr->ar_pln = 4;
2684 arp_hdr->ar_op = htons(ARPOP_REPLY);
2685
2686 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr);
2687 arp_skb->sender_ip = rtwvif->ip_addr;
2688
2689 return skb;
2690 }
2691
rtw89_fw_h2c_add_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,enum rtw89_fw_pkt_ofld_type type,u8 * id)2692 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
2693 struct rtw89_vif_link *rtwvif_link,
2694 enum rtw89_fw_pkt_ofld_type type,
2695 u8 *id)
2696 {
2697 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
2698 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
2699 struct rtw89_pktofld_info *info;
2700 struct sk_buff *skb;
2701 int ret;
2702
2703 info = kzalloc(sizeof(*info), GFP_KERNEL);
2704 if (!info)
2705 return -ENOMEM;
2706
2707 switch (type) {
2708 case RTW89_PKT_OFLD_TYPE_PS_POLL:
2709 skb = ieee80211_pspoll_get(rtwdev->hw, vif);
2710 break;
2711 case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
2712 skb = ieee80211_proberesp_get(rtwdev->hw, vif);
2713 break;
2714 case RTW89_PKT_OFLD_TYPE_NULL_DATA:
2715 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
2716 break;
2717 case RTW89_PKT_OFLD_TYPE_QOS_NULL:
2718 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
2719 break;
2720 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
2721 skb = rtw89_eapol_get(rtwdev, rtwvif_link);
2722 break;
2723 case RTW89_PKT_OFLD_TYPE_SA_QUERY:
2724 skb = rtw89_sa_query_get(rtwdev, rtwvif_link);
2725 break;
2726 case RTW89_PKT_OFLD_TYPE_ARP_RSP:
2727 skb = rtw89_arp_response_get(rtwdev, rtwvif_link);
2728 break;
2729 default:
2730 goto err;
2731 }
2732
2733 if (!skb)
2734 goto err;
2735
2736 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
2737 kfree_skb(skb);
2738
2739 if (ret)
2740 goto err;
2741
2742 list_add_tail(&info->list, &rtwvif_link->general_pkt_list);
2743 *id = info->id;
2744 return 0;
2745
2746 err:
2747 kfree(info);
2748 return -ENOMEM;
2749 }
2750
rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool notify_fw)2751 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
2752 struct rtw89_vif_link *rtwvif_link,
2753 bool notify_fw)
2754 {
2755 struct list_head *pkt_list = &rtwvif_link->general_pkt_list;
2756 struct rtw89_pktofld_info *info, *tmp;
2757
2758 list_for_each_entry_safe(info, tmp, pkt_list, list) {
2759 if (notify_fw)
2760 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
2761 else
2762 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
2763 list_del(&info->list);
2764 kfree(info);
2765 }
2766 }
2767
rtw89_fw_release_general_pkt_list(struct rtw89_dev * rtwdev,bool notify_fw)2768 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
2769 {
2770 struct rtw89_vif_link *rtwvif_link;
2771 struct rtw89_vif *rtwvif;
2772 unsigned int link_id;
2773
2774 rtw89_for_each_rtwvif(rtwdev, rtwvif)
2775 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
2776 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link,
2777 notify_fw);
2778 }
2779
2780 #define H2C_GENERAL_PKT_LEN 6
2781 #define H2C_GENERAL_PKT_ID_UND 0xff
rtw89_fw_h2c_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u8 macid)2782 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
2783 struct rtw89_vif_link *rtwvif_link, u8 macid)
2784 {
2785 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
2786 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
2787 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
2788 struct sk_buff *skb;
2789 int ret;
2790
2791 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2792 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
2793 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2794 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
2795 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
2796 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
2797
2798 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
2799 if (!skb) {
2800 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2801 return -ENOMEM;
2802 }
2803 skb_put(skb, H2C_GENERAL_PKT_LEN);
2804 SET_GENERAL_PKT_MACID(skb->data, macid);
2805 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
2806 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
2807 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
2808 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
2809 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
2810
2811 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2812 H2C_CAT_MAC,
2813 H2C_CL_FW_INFO,
2814 H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
2815 H2C_GENERAL_PKT_LEN);
2816
2817 ret = rtw89_h2c_tx(rtwdev, skb, false);
2818 if (ret) {
2819 rtw89_err(rtwdev, "failed to send h2c\n");
2820 goto fail;
2821 }
2822
2823 return 0;
2824 fail:
2825 dev_kfree_skb_any(skb);
2826
2827 return ret;
2828 }
2829
2830 #define H2C_LPS_PARM_LEN 8
rtw89_fw_h2c_lps_parm(struct rtw89_dev * rtwdev,struct rtw89_lps_parm * lps_param)2831 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
2832 struct rtw89_lps_parm *lps_param)
2833 {
2834 struct sk_buff *skb;
2835 bool done_ack;
2836 int ret;
2837
2838 if (RTW89_CHK_FW_FEATURE(LPS_DACK_BY_C2H_REG, &rtwdev->fw))
2839 done_ack = false;
2840 else
2841 done_ack = !lps_param->psmode;
2842
2843 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
2844 if (!skb) {
2845 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2846 return -ENOMEM;
2847 }
2848 skb_put(skb, H2C_LPS_PARM_LEN);
2849
2850 SET_LPS_PARM_MACID(skb->data, lps_param->macid);
2851 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
2852 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
2853 SET_LPS_PARM_RLBM(skb->data, 1);
2854 SET_LPS_PARM_SMARTPS(skb->data, 1);
2855 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
2856 SET_LPS_PARM_VOUAPSD(skb->data, 0);
2857 SET_LPS_PARM_VIUAPSD(skb->data, 0);
2858 SET_LPS_PARM_BEUAPSD(skb->data, 0);
2859 SET_LPS_PARM_BKUAPSD(skb->data, 0);
2860
2861 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2862 H2C_CAT_MAC,
2863 H2C_CL_MAC_PS,
2864 H2C_FUNC_MAC_LPS_PARM, 0, done_ack,
2865 H2C_LPS_PARM_LEN);
2866
2867 ret = rtw89_h2c_tx(rtwdev, skb, false);
2868 if (ret) {
2869 rtw89_err(rtwdev, "failed to send h2c\n");
2870 goto fail;
2871 }
2872
2873 return 0;
2874 fail:
2875 dev_kfree_skb_any(skb);
2876
2877 return ret;
2878 }
2879
rtw89_fw_h2c_lps_ch_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2880 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
2881 {
2882 const struct rtw89_chip_info *chip = rtwdev->chip;
2883 const struct rtw89_chan *chan;
2884 struct rtw89_vif_link *rtwvif_link;
2885 struct rtw89_h2c_lps_ch_info *h2c;
2886 u32 len = sizeof(*h2c);
2887 unsigned int link_id;
2888 struct sk_buff *skb;
2889 bool no_chan = true;
2890 u8 phy_idx;
2891 u32 done;
2892 int ret;
2893
2894 if (chip->chip_gen != RTW89_CHIP_BE)
2895 return 0;
2896
2897 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2898 if (!skb) {
2899 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
2900 return -ENOMEM;
2901 }
2902 skb_put(skb, len);
2903 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
2904
2905 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
2906 phy_idx = rtwvif_link->phy_idx;
2907 if (phy_idx >= ARRAY_SIZE(h2c->info))
2908 continue;
2909
2910 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2911 no_chan = false;
2912
2913 h2c->info[phy_idx].central_ch = chan->channel;
2914 h2c->info[phy_idx].pri_ch = chan->primary_channel;
2915 h2c->info[phy_idx].band = chan->band_type;
2916 h2c->info[phy_idx].bw = chan->band_width;
2917 }
2918
2919 if (no_chan) {
2920 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
2921 ret = -ENOENT;
2922 goto fail;
2923 }
2924
2925 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
2926
2927 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2928 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
2929 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
2930
2931 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
2932 ret = rtw89_h2c_tx(rtwdev, skb, false);
2933 if (ret) {
2934 rtw89_err(rtwdev, "failed to send h2c\n");
2935 goto fail;
2936 }
2937
2938 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
2939 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
2940 if (ret)
2941 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n");
2942
2943 return 0;
2944 fail:
2945 dev_kfree_skb_any(skb);
2946
2947 return ret;
2948 }
2949
rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)2950 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
2951 struct rtw89_vif *rtwvif)
2952 {
2953 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
2954 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
2955 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12};
2956 const struct rtw89_chip_info *chip = rtwdev->chip;
2957 struct rtw89_efuse *efuse = &rtwdev->efuse;
2958 struct rtw89_h2c_lps_ml_cmn_info *h2c;
2959 struct rtw89_vif_link *rtwvif_link;
2960 const struct rtw89_chan *chan;
2961 u8 bw_idx = RTW89_BB_BW_20_40;
2962 u32 len = sizeof(*h2c);
2963 unsigned int link_id;
2964 struct sk_buff *skb;
2965 u8 beacon_bw_ofst;
2966 u8 gain_band;
2967 u32 done;
2968 u8 path;
2969 int ret;
2970 int i;
2971
2972 if (chip->chip_gen != RTW89_CHIP_BE)
2973 return 0;
2974
2975 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2976 if (!skb) {
2977 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
2978 return -ENOMEM;
2979 }
2980 skb_put(skb, len);
2981 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
2982
2983 h2c->fmt_id = 0x3;
2984
2985 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
2986 h2c->rfe_type = efuse->rfe_type;
2987
2988 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
2989 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
2990 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
2991 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
2992
2993 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
2994 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
2995 h2c->band[rtwvif_link->phy_idx] = chan->band_type;
2996 h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
2997 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
2998 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
2999 else
3000 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
3001
3002 /* Fill BW20 RX gain table for beacon mode */
3003 for (i = 0; i < TIA_GAIN_NUM; i++) {
3004 h2c->tia_gain[rtwvif_link->phy_idx][i] =
3005 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
3006 }
3007
3008 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) {
3009 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx];
3010 h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst;
3011 }
3012
3013 memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
3014 gain->lna_gain[gain_band][bw_idx][path],
3015 LNA_GAIN_NUM);
3016 memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx],
3017 gain->tia_lna_op1db[gain_band][bw_idx][path],
3018 LNA_GAIN_NUM + 1);
3019 memcpy(h2c->lna_op1db[rtwvif_link->phy_idx],
3020 gain->lna_op1db[gain_band][bw_idx][path],
3021 LNA_GAIN_NUM);
3022 }
3023
3024 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3025 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
3026 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
3027
3028 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
3029 ret = rtw89_h2c_tx(rtwdev, skb, false);
3030 if (ret) {
3031 rtw89_err(rtwdev, "failed to send h2c\n");
3032 goto fail;
3033 }
3034
3035 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
3036 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
3037 if (ret)
3038 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
3039
3040 return 0;
3041 fail:
3042 dev_kfree_skb_any(skb);
3043
3044 return ret;
3045 }
3046
3047 #define H2C_P2P_ACT_LEN 20
rtw89_fw_h2c_p2p_act(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_p2p_noa_desc * desc,u8 act,u8 noa_id,u8 ctwindow_oppps)3048 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
3049 struct rtw89_vif_link *rtwvif_link,
3050 struct ieee80211_p2p_noa_desc *desc,
3051 u8 act, u8 noa_id, u8 ctwindow_oppps)
3052 {
3053 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
3054 struct sk_buff *skb;
3055 u8 *cmd;
3056 int ret;
3057
3058 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
3059 if (!skb) {
3060 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
3061 return -ENOMEM;
3062 }
3063 skb_put(skb, H2C_P2P_ACT_LEN);
3064 cmd = skb->data;
3065
3066 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id);
3067 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
3068 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
3069 RTW89_SET_FWCMD_P2P_ACT(cmd, act);
3070 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
3071 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
3072 if (desc) {
3073 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
3074 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
3075 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
3076 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
3077 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
3078 }
3079
3080 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3081 H2C_CAT_MAC, H2C_CL_MAC_PS,
3082 H2C_FUNC_P2P_ACT, 0, 0,
3083 H2C_P2P_ACT_LEN);
3084
3085 ret = rtw89_h2c_tx(rtwdev, skb, false);
3086 if (ret) {
3087 rtw89_err(rtwdev, "failed to send h2c\n");
3088 goto fail;
3089 }
3090
3091 return 0;
3092 fail:
3093 dev_kfree_skb_any(skb);
3094
3095 return ret;
3096 }
3097
__rtw89_fw_h2c_set_tx_path(struct rtw89_dev * rtwdev,struct sk_buff * skb)3098 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
3099 struct sk_buff *skb)
3100 {
3101 const struct rtw89_chip_info *chip = rtwdev->chip;
3102 struct rtw89_hal *hal = &rtwdev->hal;
3103 u8 ntx_path;
3104 u8 map_b;
3105
3106 if (chip->rf_path_num == 1) {
3107 ntx_path = RF_A;
3108 map_b = 0;
3109 } else {
3110 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB;
3111 map_b = ntx_path == RF_AB ? 1 : 0;
3112 }
3113
3114 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
3115 SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
3116 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
3117 SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
3118 SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
3119 }
3120
3121 #define H2C_CMC_TBL_LEN 68
rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3122 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
3123 struct rtw89_vif_link *rtwvif_link,
3124 struct rtw89_sta_link *rtwsta_link)
3125 {
3126 const struct rtw89_chip_info *chip = rtwdev->chip;
3127 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3128 struct sk_buff *skb;
3129 int ret;
3130
3131 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3132 if (!skb) {
3133 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3134 return -ENOMEM;
3135 }
3136 skb_put(skb, H2C_CMC_TBL_LEN);
3137 SET_CTRL_INFO_MACID(skb->data, macid);
3138 SET_CTRL_INFO_OPERATION(skb->data, 1);
3139 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3140 SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
3141 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
3142 SET_CMC_TBL_ANTSEL_A(skb->data, 0);
3143 SET_CMC_TBL_ANTSEL_B(skb->data, 0);
3144 SET_CMC_TBL_ANTSEL_C(skb->data, 0);
3145 SET_CMC_TBL_ANTSEL_D(skb->data, 0);
3146 }
3147 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
3148 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
3149 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3150 SET_CMC_TBL_DATA_DCM(skb->data, 0);
3151
3152 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3153 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3154 chip->h2c_cctl_func_id, 0, 1,
3155 H2C_CMC_TBL_LEN);
3156
3157 ret = rtw89_h2c_tx(rtwdev, skb, false);
3158 if (ret) {
3159 rtw89_err(rtwdev, "failed to send h2c\n");
3160 goto fail;
3161 }
3162
3163 return 0;
3164 fail:
3165 dev_kfree_skb_any(skb);
3166
3167 return ret;
3168 }
3169 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
3170
rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3171 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3172 struct rtw89_vif_link *rtwvif_link,
3173 struct rtw89_sta_link *rtwsta_link)
3174 {
3175 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3176 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3177 u32 len = sizeof(*h2c);
3178 struct sk_buff *skb;
3179 int ret;
3180
3181 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3182 if (!skb) {
3183 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3184 return -ENOMEM;
3185 }
3186 skb_put(skb, len);
3187 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3188
3189 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3190 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3191
3192 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE);
3193 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
3194
3195 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
3196 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
3197 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3198 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
3199
3200 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
3201
3202 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
3203
3204 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3205 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
3206
3207 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
3208 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
3209 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
3210 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
3211 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3212 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
3213
3214 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
3215 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
3216
3217 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
3218 le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
3219 le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
3220 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
3221 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
3222 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
3223
3224 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
3225
3226 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
3227 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
3228 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
3229 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
3230
3231 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
3232 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
3233 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
3234 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
3235
3236 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3237 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3238 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3239 len);
3240
3241 ret = rtw89_h2c_tx(rtwdev, skb, false);
3242 if (ret) {
3243 rtw89_err(rtwdev, "failed to send h2c\n");
3244 goto fail;
3245 }
3246
3247 return 0;
3248 fail:
3249 dev_kfree_skb_any(skb);
3250
3251 return ret;
3252 }
3253 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
3254
__get_sta_he_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_link_sta * link_sta,u8 * pads)3255 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
3256 struct ieee80211_link_sta *link_sta,
3257 u8 *pads)
3258 {
3259 bool ppe_th;
3260 u8 ppe16, ppe8;
3261 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3262 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0];
3263 u8 ru_bitmap;
3264 u8 n, idx, sh;
3265 u16 ppe;
3266 int i;
3267
3268 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
3269 link_sta->he_cap.he_cap_elem.phy_cap_info[6]);
3270 if (!ppe_th) {
3271 u8 pad;
3272
3273 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
3274 link_sta->he_cap.he_cap_elem.phy_cap_info[9]);
3275
3276 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3277 pads[i] = pad;
3278
3279 return;
3280 }
3281
3282 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
3283 n = hweight8(ru_bitmap);
3284 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3285
3286 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3287 if (!(ru_bitmap & BIT(i))) {
3288 pads[i] = 1;
3289 continue;
3290 }
3291
3292 idx = n >> 3;
3293 sh = n & 7;
3294 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
3295
3296 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx]));
3297 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3298 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
3299 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3300
3301 if (ppe16 != 7 && ppe8 == 7)
3302 pads[i] = RTW89_PE_DURATION_16;
3303 else if (ppe8 != 7)
3304 pads[i] = RTW89_PE_DURATION_8;
3305 else
3306 pads[i] = RTW89_PE_DURATION_0;
3307 }
3308 }
3309
rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3310 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
3311 struct rtw89_vif_link *rtwvif_link,
3312 struct rtw89_sta_link *rtwsta_link)
3313 {
3314 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3315 const struct rtw89_chip_info *chip = rtwdev->chip;
3316 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3317 rtwvif_link->chanctx_idx);
3318 struct ieee80211_link_sta *link_sta;
3319 struct sk_buff *skb;
3320 u8 pads[RTW89_PPE_BW_NUM];
3321 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3322 u16 lowest_rate;
3323 int ret;
3324
3325 memset(pads, 0, sizeof(pads));
3326
3327 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3328 if (!skb) {
3329 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3330 return -ENOMEM;
3331 }
3332
3333 rcu_read_lock();
3334
3335 if (rtwsta_link)
3336 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3337
3338 if (rtwsta_link && link_sta->he_cap.has_he)
3339 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3340
3341 if (vif->p2p)
3342 lowest_rate = RTW89_HW_RATE_OFDM6;
3343 else if (chan->band_type == RTW89_BAND_2G)
3344 lowest_rate = RTW89_HW_RATE_CCK1;
3345 else
3346 lowest_rate = RTW89_HW_RATE_OFDM6;
3347
3348 skb_put(skb, H2C_CMC_TBL_LEN);
3349 SET_CTRL_INFO_MACID(skb->data, mac_id);
3350 SET_CTRL_INFO_OPERATION(skb->data, 1);
3351 SET_CMC_TBL_DISRTSFB(skb->data, 1);
3352 SET_CMC_TBL_DISDATAFB(skb->data, 1);
3353 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
3354 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
3355 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
3356 if (vif->type == NL80211_IFTYPE_STATION)
3357 SET_CMC_TBL_ULDL(skb->data, 1);
3358 else
3359 SET_CMC_TBL_ULDL(skb->data, 0);
3360 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port);
3361 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
3362 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3363 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3364 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3365 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3366 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3367 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3368 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3369 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3370 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3371 }
3372 if (rtwsta_link)
3373 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
3374 link_sta->he_cap.has_he);
3375 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3376 SET_CMC_TBL_DATA_DCM(skb->data, 0);
3377
3378 rcu_read_unlock();
3379
3380 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3381 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3382 chip->h2c_cctl_func_id, 0, 1,
3383 H2C_CMC_TBL_LEN);
3384
3385 ret = rtw89_h2c_tx(rtwdev, skb, false);
3386 if (ret) {
3387 rtw89_err(rtwdev, "failed to send h2c\n");
3388 goto fail;
3389 }
3390
3391 return 0;
3392 fail:
3393 dev_kfree_skb_any(skb);
3394
3395 return ret;
3396 }
3397 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
3398
__get_sta_eht_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_link_sta * link_sta,u8 * pads)3399 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
3400 struct ieee80211_link_sta *link_sta,
3401 u8 *pads)
3402 {
3403 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3404 u16 ppe_thres_hdr;
3405 u8 ppe16, ppe8;
3406 u8 n, idx, sh;
3407 u8 ru_bitmap;
3408 bool ppe_th;
3409 u16 ppe;
3410 int i;
3411
3412 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3413 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
3414 if (!ppe_th) {
3415 u8 pad;
3416
3417 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3418 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
3419
3420 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3421 pads[i] = pad;
3422
3423 return;
3424 }
3425
3426 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres);
3427 ru_bitmap = u16_get_bits(ppe_thres_hdr,
3428 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
3429 n = hweight8(ru_bitmap);
3430 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
3431 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3432
3433 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3434 if (!(ru_bitmap & BIT(i))) {
3435 pads[i] = 1;
3436 continue;
3437 }
3438
3439 idx = n >> 3;
3440 sh = n & 7;
3441 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
3442
3443 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx);
3444 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3445 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
3446 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3447
3448 if (ppe16 != 7 && ppe8 == 7)
3449 pads[i] = RTW89_PE_DURATION_16_20;
3450 else if (ppe8 != 7)
3451 pads[i] = RTW89_PE_DURATION_8;
3452 else
3453 pads[i] = RTW89_PE_DURATION_0;
3454 }
3455 }
3456
rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3457 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3458 struct rtw89_vif_link *rtwvif_link,
3459 struct rtw89_sta_link *rtwsta_link)
3460 {
3461 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3462 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3463 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3464 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3465 struct ieee80211_bss_conf *bss_conf;
3466 struct ieee80211_link_sta *link_sta;
3467 u8 pads[RTW89_PPE_BW_NUM];
3468 u32 len = sizeof(*h2c);
3469 struct sk_buff *skb;
3470 u16 lowest_rate;
3471 int ret;
3472
3473 memset(pads, 0, sizeof(pads));
3474
3475 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3476 if (!skb) {
3477 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3478 return -ENOMEM;
3479 }
3480
3481 rcu_read_lock();
3482
3483 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
3484
3485 if (rtwsta_link) {
3486 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3487
3488 if (link_sta->eht_cap.has_eht)
3489 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads);
3490 else if (link_sta->he_cap.has_he)
3491 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3492 }
3493
3494 if (vif->p2p)
3495 lowest_rate = RTW89_HW_RATE_OFDM6;
3496 else if (chan->band_type == RTW89_BAND_2G)
3497 lowest_rate = RTW89_HW_RATE_CCK1;
3498 else
3499 lowest_rate = RTW89_HW_RATE_OFDM6;
3500
3501 skb_put(skb, len);
3502 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3503
3504 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3505 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3506
3507 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
3508 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
3509 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
3510 CCTLINFO_G7_W0_DISDATAFB);
3511
3512 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3513 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3514
3515 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
3516 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
3517
3518 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
3519 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
3520
3521 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
3522 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
3523
3524 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
3525 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
3526 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
3527 }
3528
3529 if (bss_conf->eht_support) {
3530 u16 punct = bss_conf->chanreq.oper.punctured;
3531
3532 h2c->w4 |= le32_encode_bits(~punct,
3533 CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3534 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3535 }
3536
3537 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
3538 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
3539 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
3540 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
3541 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
3542 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
3543 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
3544 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
3545 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
3546 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3547 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
3548 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
3549 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
3550 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
3551 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3552
3553 h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) |
3554 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
3555 CCTLINFO_G7_W6_ULDL);
3556 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
3557
3558 if (rtwsta_link) {
3559 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
3560 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
3561 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
3562 }
3563
3564 rcu_read_unlock();
3565
3566 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3567 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3568 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3569 len);
3570
3571 ret = rtw89_h2c_tx(rtwdev, skb, false);
3572 if (ret) {
3573 rtw89_err(rtwdev, "failed to send h2c\n");
3574 goto fail;
3575 }
3576
3577 return 0;
3578 fail:
3579 dev_kfree_skb_any(skb);
3580
3581 return ret;
3582 }
3583 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
3584
rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3585 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3586 struct rtw89_vif_link *rtwvif_link,
3587 struct rtw89_sta_link *rtwsta_link)
3588 {
3589 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
3590 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3591 u32 len = sizeof(*h2c);
3592 struct sk_buff *skb;
3593 u16 agg_num = 0;
3594 u8 ba_bmap = 0;
3595 int ret;
3596 u8 tid;
3597
3598 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3599 if (!skb) {
3600 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
3601 return -ENOMEM;
3602 }
3603 skb_put(skb, len);
3604 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3605
3606 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
3607 if (agg_num == 0)
3608 agg_num = rtwsta->ampdu_params[tid].agg_num;
3609 else
3610 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
3611 }
3612
3613 if (agg_num <= 0x20)
3614 ba_bmap = 3;
3615 else if (agg_num > 0x20 && agg_num <= 0x40)
3616 ba_bmap = 0;
3617 else if (agg_num > 0x40 && agg_num <= 0x80)
3618 ba_bmap = 1;
3619 else if (agg_num > 0x80 && agg_num <= 0x100)
3620 ba_bmap = 2;
3621 else if (agg_num > 0x100 && agg_num <= 0x200)
3622 ba_bmap = 4;
3623 else if (agg_num > 0x200 && agg_num <= 0x400)
3624 ba_bmap = 5;
3625
3626 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
3627 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3628
3629 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
3630 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
3631
3632 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3633 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3634 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
3635 len);
3636
3637 ret = rtw89_h2c_tx(rtwdev, skb, false);
3638 if (ret) {
3639 rtw89_err(rtwdev, "failed to send h2c\n");
3640 goto fail;
3641 }
3642
3643 return 0;
3644 fail:
3645 dev_kfree_skb_any(skb);
3646
3647 return ret;
3648 }
3649 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
3650
rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)3651 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
3652 struct rtw89_sta_link *rtwsta_link)
3653 {
3654 const struct rtw89_chip_info *chip = rtwdev->chip;
3655 struct sk_buff *skb;
3656 int ret;
3657
3658 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3659 if (!skb) {
3660 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3661 return -ENOMEM;
3662 }
3663 skb_put(skb, H2C_CMC_TBL_LEN);
3664 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
3665 SET_CTRL_INFO_OPERATION(skb->data, 1);
3666 if (rtwsta_link->cctl_tx_time) {
3667 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
3668 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time);
3669 }
3670 if (rtwsta_link->cctl_tx_retry_limit) {
3671 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
3672 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt);
3673 }
3674
3675 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3676 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3677 chip->h2c_cctl_func_id, 0, 1,
3678 H2C_CMC_TBL_LEN);
3679
3680 ret = rtw89_h2c_tx(rtwdev, skb, false);
3681 if (ret) {
3682 rtw89_err(rtwdev, "failed to send h2c\n");
3683 goto fail;
3684 }
3685
3686 return 0;
3687 fail:
3688 dev_kfree_skb_any(skb);
3689
3690 return ret;
3691 }
3692 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl);
3693
rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)3694 int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3695 struct rtw89_sta_link *rtwsta_link)
3696 {
3697 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3698 u32 len = sizeof(*h2c);
3699 struct sk_buff *skb;
3700 int ret;
3701
3702 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3703 if (!skb) {
3704 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n");
3705 return -ENOMEM;
3706 }
3707 skb_put(skb, len);
3708 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3709
3710 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
3711 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3712
3713 if (rtwsta_link->cctl_tx_time) {
3714 h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL);
3715 h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL);
3716
3717 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time,
3718 CCTLINFO_G7_W2_AMPDU_MAX_TIME);
3719 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME);
3720 }
3721 if (rtwsta_link->cctl_tx_retry_limit) {
3722 h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) |
3723 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt,
3724 CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
3725 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL |
3726 CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
3727 }
3728
3729 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3730 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3731 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3732 len);
3733
3734 ret = rtw89_h2c_tx(rtwdev, skb, false);
3735 if (ret) {
3736 rtw89_err(rtwdev, "failed to send h2c\n");
3737 goto fail;
3738 }
3739
3740 return 0;
3741 fail:
3742 dev_kfree_skb_any(skb);
3743
3744 return ret;
3745 }
3746 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7);
3747
rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u16 punctured)3748 int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3749 struct rtw89_vif_link *rtwvif_link,
3750 u16 punctured)
3751 {
3752 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3753 u32 len = sizeof(*h2c);
3754 struct sk_buff *skb;
3755 int ret;
3756
3757 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3758 if (!skb) {
3759 rtw89_err(rtwdev, "failed to alloc skb for punctured cmac g7\n");
3760 return -ENOMEM;
3761 }
3762
3763 skb_put(skb, len);
3764 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3765
3766 h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, CCTLINFO_G7_C0_MACID) |
3767 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3768
3769 h2c->w4 = le32_encode_bits(~punctured, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3770 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3771
3772 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3773 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3774 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3775 len);
3776
3777 ret = rtw89_h2c_tx(rtwdev, skb, false);
3778 if (ret) {
3779 rtw89_err(rtwdev, "failed to send h2c\n");
3780 goto fail;
3781 }
3782
3783 return 0;
3784 fail:
3785 dev_kfree_skb_any(skb);
3786
3787 return ret;
3788 }
3789 EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_g7);
3790
rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)3791 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
3792 struct rtw89_sta_link *rtwsta_link)
3793 {
3794 const struct rtw89_chip_info *chip = rtwdev->chip;
3795 struct sk_buff *skb;
3796 int ret;
3797
3798 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
3799 return 0;
3800
3801 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3802 if (!skb) {
3803 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3804 return -ENOMEM;
3805 }
3806 skb_put(skb, H2C_CMC_TBL_LEN);
3807 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
3808 SET_CTRL_INFO_OPERATION(skb->data, 1);
3809
3810 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
3811
3812 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3813 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3814 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
3815 H2C_CMC_TBL_LEN);
3816
3817 ret = rtw89_h2c_tx(rtwdev, skb, false);
3818 if (ret) {
3819 rtw89_err(rtwdev, "failed to send h2c\n");
3820 goto fail;
3821 }
3822
3823 return 0;
3824 fail:
3825 dev_kfree_skb_any(skb);
3826
3827 return ret;
3828 }
3829
rtw89_fw_h2c_update_beacon(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)3830 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
3831 struct rtw89_vif_link *rtwvif_link)
3832 {
3833 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3834 rtwvif_link->chanctx_idx);
3835 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3836 struct rtw89_h2c_bcn_upd *h2c;
3837 struct sk_buff *skb_beacon;
3838 struct ieee80211_hdr *hdr;
3839 u32 len = sizeof(*h2c);
3840 struct sk_buff *skb;
3841 int bcn_total_len;
3842 u16 beacon_rate;
3843 u16 tim_offset;
3844 void *noa_data;
3845 u8 noa_len;
3846 int ret;
3847
3848 if (vif->p2p)
3849 beacon_rate = RTW89_HW_RATE_OFDM6;
3850 else if (chan->band_type == RTW89_BAND_2G)
3851 beacon_rate = RTW89_HW_RATE_CCK1;
3852 else
3853 beacon_rate = RTW89_HW_RATE_OFDM6;
3854
3855 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
3856 NULL, 0);
3857 if (!skb_beacon) {
3858 rtw89_err(rtwdev, "failed to get beacon skb\n");
3859 return -ENOMEM;
3860 }
3861
3862 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
3863 if (noa_len &&
3864 (noa_len <= skb_tailroom(skb_beacon) ||
3865 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
3866 skb_put_data(skb_beacon, noa_data, noa_len);
3867 }
3868
3869 hdr = (struct ieee80211_hdr *)skb_beacon;
3870 tim_offset -= ieee80211_hdrlen(hdr->frame_control);
3871
3872 bcn_total_len = len + skb_beacon->len;
3873 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
3874 if (!skb) {
3875 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3876 dev_kfree_skb_any(skb_beacon);
3877 return -ENOMEM;
3878 }
3879 skb_put(skb, len);
3880 h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
3881
3882 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) |
3883 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
3884 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
3885 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
3886 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
3887 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
3888 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
3889 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
3890
3891 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
3892 dev_kfree_skb_any(skb_beacon);
3893
3894 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3895 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3896 H2C_FUNC_MAC_BCN_UPD, 0, 1,
3897 bcn_total_len);
3898
3899 ret = rtw89_h2c_tx(rtwdev, skb, false);
3900 if (ret) {
3901 rtw89_err(rtwdev, "failed to send h2c\n");
3902 dev_kfree_skb_any(skb);
3903 return ret;
3904 }
3905
3906 return 0;
3907 }
3908 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
3909
rtw89_fw_h2c_update_beacon_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)3910 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
3911 struct rtw89_vif_link *rtwvif_link)
3912 {
3913 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3914 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3915 struct rtw89_h2c_bcn_upd_be *h2c;
3916 struct sk_buff *skb_beacon;
3917 struct ieee80211_hdr *hdr;
3918 u32 len = sizeof(*h2c);
3919 struct sk_buff *skb;
3920 int bcn_total_len;
3921 u16 beacon_rate;
3922 u16 tim_offset;
3923 void *noa_data;
3924 u8 noa_len;
3925 int ret;
3926
3927 if (vif->p2p)
3928 beacon_rate = RTW89_HW_RATE_OFDM6;
3929 else if (chan->band_type == RTW89_BAND_2G)
3930 beacon_rate = RTW89_HW_RATE_CCK1;
3931 else
3932 beacon_rate = RTW89_HW_RATE_OFDM6;
3933
3934 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
3935 NULL, 0);
3936 if (!skb_beacon) {
3937 rtw89_err(rtwdev, "failed to get beacon skb\n");
3938 return -ENOMEM;
3939 }
3940
3941 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
3942 if (noa_len &&
3943 (noa_len <= skb_tailroom(skb_beacon) ||
3944 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
3945 skb_put_data(skb_beacon, noa_data, noa_len);
3946 }
3947
3948 hdr = (struct ieee80211_hdr *)skb_beacon;
3949 tim_offset -= ieee80211_hdrlen(hdr->frame_control);
3950
3951 bcn_total_len = len + skb_beacon->len;
3952 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
3953 if (!skb) {
3954 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3955 dev_kfree_skb_any(skb_beacon);
3956 return -ENOMEM;
3957 }
3958 skb_put(skb, len);
3959 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
3960
3961 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
3962 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
3963 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
3964 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
3965 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
3966 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
3967 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
3968 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
3969
3970 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
3971 dev_kfree_skb_any(skb_beacon);
3972
3973 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3974 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3975 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
3976 bcn_total_len);
3977
3978 ret = rtw89_h2c_tx(rtwdev, skb, false);
3979 if (ret) {
3980 rtw89_err(rtwdev, "failed to send h2c\n");
3981 goto fail;
3982 }
3983
3984 return 0;
3985
3986 fail:
3987 dev_kfree_skb_any(skb);
3988
3989 return ret;
3990 }
3991 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
3992
rtw89_fw_h2c_role_maintain(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,enum rtw89_upd_mode upd_mode)3993 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
3994 struct rtw89_vif_link *rtwvif_link,
3995 struct rtw89_sta_link *rtwsta_link,
3996 enum rtw89_upd_mode upd_mode)
3997 {
3998 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3999 struct rtw89_h2c_role_maintain *h2c;
4000 u32 len = sizeof(*h2c);
4001 struct sk_buff *skb;
4002 u8 self_role;
4003 int ret;
4004
4005 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
4006 if (rtwsta_link)
4007 self_role = RTW89_SELF_ROLE_AP_CLIENT;
4008 else
4009 self_role = rtwvif_link->self_role;
4010 } else {
4011 self_role = rtwvif_link->self_role;
4012 }
4013
4014 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4015 if (!skb) {
4016 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4017 return -ENOMEM;
4018 }
4019 skb_put(skb, len);
4020 h2c = (struct rtw89_h2c_role_maintain *)skb->data;
4021
4022 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) |
4023 le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) |
4024 le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) |
4025 le32_encode_bits(rtwvif_link->wifi_role,
4026 RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) |
4027 le32_encode_bits(rtwvif_link->mac_idx,
4028 RTW89_H2C_ROLE_MAINTAIN_W0_BAND) |
4029 le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT);
4030
4031 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4032 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
4033 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
4034 len);
4035
4036 ret = rtw89_h2c_tx(rtwdev, skb, false);
4037 if (ret) {
4038 rtw89_err(rtwdev, "failed to send h2c\n");
4039 goto fail;
4040 }
4041
4042 return 0;
4043 fail:
4044 dev_kfree_skb_any(skb);
4045
4046 return ret;
4047 }
4048
4049 static enum rtw89_fw_sta_type
rtw89_fw_get_sta_type(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4050 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4051 struct rtw89_sta_link *rtwsta_link)
4052 {
4053 struct ieee80211_bss_conf *bss_conf;
4054 struct ieee80211_link_sta *link_sta;
4055 enum rtw89_fw_sta_type type;
4056
4057 rcu_read_lock();
4058
4059 if (!rtwsta_link)
4060 goto by_vif;
4061
4062 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
4063
4064 if (link_sta->eht_cap.has_eht)
4065 type = RTW89_FW_BE_STA;
4066 else if (link_sta->he_cap.has_he)
4067 type = RTW89_FW_AX_STA;
4068 else
4069 type = RTW89_FW_N_AC_STA;
4070
4071 goto out;
4072
4073 by_vif:
4074 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
4075
4076 if (bss_conf->eht_support)
4077 type = RTW89_FW_BE_STA;
4078 else if (bss_conf->he_support)
4079 type = RTW89_FW_AX_STA;
4080 else
4081 type = RTW89_FW_N_AC_STA;
4082
4083 out:
4084 rcu_read_unlock();
4085
4086 return type;
4087 }
4088
rtw89_fw_h2c_join_info(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool dis_conn)4089 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4090 struct rtw89_sta_link *rtwsta_link, bool dis_conn)
4091 {
4092 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
4093 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4094 bool is_mld = ieee80211_vif_is_mld(vif);
4095 u8 self_role = rtwvif_link->self_role;
4096 enum rtw89_fw_sta_type sta_type;
4097 u8 net_type = rtwvif_link->net_type;
4098 struct rtw89_h2c_join_v1 *h2c_v1;
4099 struct rtw89_h2c_join *h2c;
4100 u32 len = sizeof(*h2c);
4101 bool format_v1 = false;
4102 struct sk_buff *skb;
4103 u8 main_mac_id;
4104 bool init_ps;
4105 int ret;
4106
4107 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
4108 len = sizeof(*h2c_v1);
4109 format_v1 = true;
4110 }
4111
4112 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) {
4113 self_role = RTW89_SELF_ROLE_AP_CLIENT;
4114 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
4115 }
4116
4117 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4118 if (!skb) {
4119 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4120 return -ENOMEM;
4121 }
4122 skb_put(skb, len);
4123 h2c = (struct rtw89_h2c_join *)skb->data;
4124
4125 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
4126 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
4127 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
4128 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) |
4129 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) |
4130 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
4131 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
4132 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
4133 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
4134 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
4135 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
4136 le32_encode_bits(rtwvif_link->wifi_role,
4137 RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
4138 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
4139
4140 if (!format_v1)
4141 goto done;
4142
4143 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
4144
4145 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link);
4146 init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif);
4147
4148 if (rtwsta_link)
4149 main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta);
4150 else
4151 main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif);
4152
4153 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) |
4154 le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) |
4155 le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) |
4156 le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR,
4157 RTW89_H2C_JOININFO_W1_MLO_MODE) |
4158 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) |
4159 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) |
4160 le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) |
4161 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US,
4162 RTW89_H2C_JOININFO_W1_EMLSR_PADDING) |
4163 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US,
4164 RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) |
4165 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) |
4166 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT);
4167
4168 h2c_v1->w2 = 0;
4169
4170 done:
4171 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4172 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
4173 H2C_FUNC_MAC_JOININFO, 0, 1,
4174 len);
4175
4176 ret = rtw89_h2c_tx(rtwdev, skb, false);
4177 if (ret) {
4178 rtw89_err(rtwdev, "failed to send h2c\n");
4179 goto fail;
4180 }
4181
4182 return 0;
4183 fail:
4184 dev_kfree_skb_any(skb);
4185
4186 return ret;
4187 }
4188
rtw89_fw_h2c_notify_dbcc(struct rtw89_dev * rtwdev,bool en)4189 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en)
4190 {
4191 struct rtw89_h2c_notify_dbcc *h2c;
4192 u32 len = sizeof(*h2c);
4193 struct sk_buff *skb;
4194 int ret;
4195
4196 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4197 if (!skb) {
4198 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n");
4199 return -ENOMEM;
4200 }
4201 skb_put(skb, len);
4202 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data;
4203
4204 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN);
4205
4206 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4207 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
4208 H2C_FUNC_NOTIFY_DBCC, 0, 1,
4209 len);
4210
4211 ret = rtw89_h2c_tx(rtwdev, skb, false);
4212 if (ret) {
4213 rtw89_err(rtwdev, "failed to send h2c\n");
4214 goto fail;
4215 }
4216
4217 return 0;
4218 fail:
4219 dev_kfree_skb_any(skb);
4220
4221 return ret;
4222 }
4223
rtw89_fw_h2c_macid_pause(struct rtw89_dev * rtwdev,u8 sh,u8 grp,bool pause)4224 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
4225 bool pause)
4226 {
4227 struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
4228 struct rtw89_fw_macid_pause_grp *h2c;
4229 __le32 set = cpu_to_le32(BIT(sh));
4230 u8 h2c_macid_pause_id;
4231 struct sk_buff *skb;
4232 u32 len;
4233 int ret;
4234
4235 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
4236 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
4237 len = sizeof(*h2c_new);
4238 } else {
4239 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
4240 len = sizeof(*h2c);
4241 }
4242
4243 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4244 if (!skb) {
4245 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
4246 return -ENOMEM;
4247 }
4248 skb_put(skb, len);
4249
4250 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
4251 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
4252
4253 h2c_new->n[0].pause_mask_grp[grp] = set;
4254 h2c_new->n[0].sleep_mask_grp[grp] = set;
4255 if (pause) {
4256 h2c_new->n[0].pause_grp[grp] = set;
4257 h2c_new->n[0].sleep_grp[grp] = set;
4258 }
4259 } else {
4260 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
4261
4262 h2c->mask_grp[grp] = set;
4263 if (pause)
4264 h2c->pause_grp[grp] = set;
4265 }
4266
4267 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4268 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4269 h2c_macid_pause_id, 1, 0,
4270 len);
4271
4272 ret = rtw89_h2c_tx(rtwdev, skb, false);
4273 if (ret) {
4274 rtw89_err(rtwdev, "failed to send h2c\n");
4275 goto fail;
4276 }
4277
4278 return 0;
4279 fail:
4280 dev_kfree_skb_any(skb);
4281
4282 return ret;
4283 }
4284
4285 #define H2C_EDCA_LEN 12
rtw89_fw_h2c_set_edca(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u8 ac,u32 val)4286 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4287 u8 ac, u32 val)
4288 {
4289 struct sk_buff *skb;
4290 int ret;
4291
4292 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
4293 if (!skb) {
4294 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
4295 return -ENOMEM;
4296 }
4297 skb_put(skb, H2C_EDCA_LEN);
4298 RTW89_SET_EDCA_SEL(skb->data, 0);
4299 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx);
4300 RTW89_SET_EDCA_WMM(skb->data, 0);
4301 RTW89_SET_EDCA_AC(skb->data, ac);
4302 RTW89_SET_EDCA_PARAM(skb->data, val);
4303
4304 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4305 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4306 H2C_FUNC_USR_EDCA, 0, 1,
4307 H2C_EDCA_LEN);
4308
4309 ret = rtw89_h2c_tx(rtwdev, skb, false);
4310 if (ret) {
4311 rtw89_err(rtwdev, "failed to send h2c\n");
4312 goto fail;
4313 }
4314
4315 return 0;
4316 fail:
4317 dev_kfree_skb_any(skb);
4318
4319 return ret;
4320 }
4321
4322 #define H2C_TSF32_TOGL_LEN 4
rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool en)4323 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev,
4324 struct rtw89_vif_link *rtwvif_link,
4325 bool en)
4326 {
4327 struct sk_buff *skb;
4328 u16 early_us = en ? 2000 : 0;
4329 u8 *cmd;
4330 int ret;
4331
4332 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
4333 if (!skb) {
4334 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
4335 return -ENOMEM;
4336 }
4337 skb_put(skb, H2C_TSF32_TOGL_LEN);
4338 cmd = skb->data;
4339
4340 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx);
4341 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
4342 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port);
4343 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
4344
4345 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4346 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4347 H2C_FUNC_TSF32_TOGL, 0, 0,
4348 H2C_TSF32_TOGL_LEN);
4349
4350 ret = rtw89_h2c_tx(rtwdev, skb, false);
4351 if (ret) {
4352 rtw89_err(rtwdev, "failed to send h2c\n");
4353 goto fail;
4354 }
4355
4356 return 0;
4357 fail:
4358 dev_kfree_skb_any(skb);
4359
4360 return ret;
4361 }
4362
4363 #define H2C_OFLD_CFG_LEN 8
rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev * rtwdev)4364 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
4365 {
4366 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
4367 struct sk_buff *skb;
4368 int ret;
4369
4370 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
4371 if (!skb) {
4372 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
4373 return -ENOMEM;
4374 }
4375 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
4376
4377 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4378 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4379 H2C_FUNC_OFLD_CFG, 0, 1,
4380 H2C_OFLD_CFG_LEN);
4381
4382 ret = rtw89_h2c_tx(rtwdev, skb, false);
4383 if (ret) {
4384 rtw89_err(rtwdev, "failed to send h2c\n");
4385 goto fail;
4386 }
4387
4388 return 0;
4389 fail:
4390 dev_kfree_skb_any(skb);
4391
4392 return ret;
4393 }
4394
rtw89_fw_h2c_tx_duty(struct rtw89_dev * rtwdev,u8 lv)4395 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv)
4396 {
4397 struct rtw89_h2c_tx_duty *h2c;
4398 u32 len = sizeof(*h2c);
4399 struct sk_buff *skb;
4400 u16 pause, active;
4401 int ret;
4402
4403 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4404 if (!skb) {
4405 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n");
4406 return -ENOMEM;
4407 }
4408
4409 skb_put(skb, len);
4410 h2c = (struct rtw89_h2c_tx_duty *)skb->data;
4411
4412 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100);
4413
4414 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) {
4415 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP);
4416 } else {
4417 active = 100 - lv * RTW89_THERMAL_PROT_STEP;
4418 pause = 100 - active;
4419
4420 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) |
4421 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK);
4422 }
4423
4424 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4425 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4426 H2C_FUNC_TX_DUTY, 0, 0, len);
4427
4428 ret = rtw89_h2c_tx(rtwdev, skb, false);
4429 if (ret) {
4430 rtw89_err(rtwdev, "failed to send h2c\n");
4431 goto fail;
4432 }
4433
4434 return 0;
4435 fail:
4436 dev_kfree_skb_any(skb);
4437
4438 return ret;
4439 }
4440
rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connect)4441 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
4442 struct rtw89_vif_link *rtwvif_link,
4443 bool connect)
4444 {
4445 struct ieee80211_bss_conf *bss_conf;
4446 s32 thold = RTW89_DEFAULT_CQM_THOLD;
4447 u32 hyst = RTW89_DEFAULT_CQM_HYST;
4448 struct rtw89_h2c_bcnfltr *h2c;
4449 u32 len = sizeof(*h2c);
4450 struct sk_buff *skb;
4451 u8 max_cnt, cnt;
4452 int ret;
4453
4454 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
4455 return -EINVAL;
4456
4457 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
4458 return -EINVAL;
4459
4460 rcu_read_lock();
4461
4462 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
4463
4464 if (bss_conf->cqm_rssi_hyst)
4465 hyst = bss_conf->cqm_rssi_hyst;
4466 if (bss_conf->cqm_rssi_thold)
4467 thold = bss_conf->cqm_rssi_thold;
4468
4469 rcu_read_unlock();
4470
4471 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4472 if (!skb) {
4473 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
4474 return -ENOMEM;
4475 }
4476
4477 skb_put(skb, len);
4478 h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
4479
4480 if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw))
4481 max_cnt = BIT(7) - 1;
4482 else
4483 max_cnt = BIT(4) - 1;
4484
4485 cnt = min(RTW89_BCN_LOSS_CNT, max_cnt);
4486
4487 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
4488 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
4489 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
4490 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
4491 RTW89_H2C_BCNFLTR_W0_MODE) |
4492 le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) |
4493 le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) |
4494 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
4495 le32_encode_bits(thold + MAX_RSSI,
4496 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
4497 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
4498
4499 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4500 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4501 H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
4502
4503 ret = rtw89_h2c_tx(rtwdev, skb, false);
4504 if (ret) {
4505 rtw89_err(rtwdev, "failed to send h2c\n");
4506 goto fail;
4507 }
4508
4509 return 0;
4510 fail:
4511 dev_kfree_skb_any(skb);
4512
4513 return ret;
4514 }
4515
rtw89_fw_h2c_rssi_offload(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)4516 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
4517 struct rtw89_rx_phy_ppdu *phy_ppdu)
4518 {
4519 struct rtw89_h2c_ofld_rssi *h2c;
4520 u32 len = sizeof(*h2c);
4521 struct sk_buff *skb;
4522 s8 rssi;
4523 int ret;
4524
4525 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
4526 return -EINVAL;
4527
4528 if (!phy_ppdu)
4529 return -EINVAL;
4530
4531 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4532 if (!skb) {
4533 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
4534 return -ENOMEM;
4535 }
4536
4537 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
4538 skb_put(skb, len);
4539 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
4540
4541 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
4542 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
4543 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
4544
4545 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4546 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4547 H2C_FUNC_OFLD_RSSI, 0, 1, len);
4548
4549 ret = rtw89_h2c_tx(rtwdev, skb, false);
4550 if (ret) {
4551 rtw89_err(rtwdev, "failed to send h2c\n");
4552 goto fail;
4553 }
4554
4555 return 0;
4556 fail:
4557 dev_kfree_skb_any(skb);
4558
4559 return ret;
4560 }
4561
rtw89_fw_h2c_tp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)4562 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
4563 {
4564 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
4565 struct rtw89_traffic_stats *stats = &rtwvif->stats;
4566 struct rtw89_h2c_ofld *h2c;
4567 u32 len = sizeof(*h2c);
4568 struct sk_buff *skb;
4569 int ret;
4570
4571 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
4572 return -EINVAL;
4573
4574 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4575 if (!skb) {
4576 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
4577 return -ENOMEM;
4578 }
4579
4580 skb_put(skb, len);
4581 h2c = (struct rtw89_h2c_ofld *)skb->data;
4582
4583 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
4584 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
4585 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
4586
4587 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4588 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
4589 H2C_FUNC_OFLD_TP, 0, 1, len);
4590
4591 ret = rtw89_h2c_tx(rtwdev, skb, false);
4592 if (ret) {
4593 rtw89_err(rtwdev, "failed to send h2c\n");
4594 goto fail;
4595 }
4596
4597 return 0;
4598 fail:
4599 dev_kfree_skb_any(skb);
4600
4601 return ret;
4602 }
4603
rtw89_fw_h2c_ra(struct rtw89_dev * rtwdev,struct rtw89_ra_info * ra,bool csi)4604 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
4605 {
4606 const struct rtw89_chip_info *chip = rtwdev->chip;
4607 struct rtw89_h2c_ra_v1 *h2c_v1;
4608 struct rtw89_h2c_ra *h2c;
4609 u32 len = sizeof(*h2c);
4610 bool format_v1 = false;
4611 struct sk_buff *skb;
4612 int ret;
4613
4614 if (chip->chip_gen == RTW89_CHIP_BE) {
4615 len = sizeof(*h2c_v1);
4616 format_v1 = true;
4617 }
4618
4619 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4620 if (!skb) {
4621 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4622 return -ENOMEM;
4623 }
4624 skb_put(skb, len);
4625 h2c = (struct rtw89_h2c_ra *)skb->data;
4626 rtw89_debug(rtwdev, RTW89_DBG_RA,
4627 "ra cmd msk: %llx ", ra->ra_mask);
4628
4629 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
4630 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
4631 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
4632 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
4633 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
4634 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
4635 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
4636 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
4637 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
4638 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
4639 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
4640 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
4641 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
4642 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
4643 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
4644 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
4645 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
4646 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
4647
4648 if (!format_v1)
4649 goto csi;
4650
4651 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
4652 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
4653 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
4654
4655 csi:
4656 if (!csi)
4657 goto done;
4658
4659 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
4660 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
4661 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
4662 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
4663 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
4664 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
4665 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
4666 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
4667 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
4668
4669 done:
4670 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4671 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
4672 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
4673 len);
4674
4675 ret = rtw89_h2c_tx(rtwdev, skb, false);
4676 if (ret) {
4677 rtw89_err(rtwdev, "failed to send h2c\n");
4678 goto fail;
4679 }
4680
4681 return 0;
4682 fail:
4683 dev_kfree_skb_any(skb);
4684
4685 return ret;
4686 }
4687
rtw89_fw_h2c_cxdrv_init(struct rtw89_dev * rtwdev,u8 type)4688 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type)
4689 {
4690 struct rtw89_btc *btc = &rtwdev->btc;
4691 struct rtw89_btc_dm *dm = &btc->dm;
4692 struct rtw89_btc_init_info *init_info = &dm->init_info.init;
4693 struct rtw89_btc_module *module = &init_info->module;
4694 struct rtw89_btc_ant_info *ant = &module->ant;
4695 struct rtw89_h2c_cxinit *h2c;
4696 u32 len = sizeof(*h2c);
4697 struct sk_buff *skb;
4698 int ret;
4699
4700 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4701 if (!skb) {
4702 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
4703 return -ENOMEM;
4704 }
4705 skb_put(skb, len);
4706 h2c = (struct rtw89_h2c_cxinit *)skb->data;
4707
4708 h2c->hdr.type = type;
4709 h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
4710
4711 h2c->ant_type = ant->type;
4712 h2c->ant_num = ant->num;
4713 h2c->ant_iso = ant->isolation;
4714 h2c->ant_info =
4715 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
4716 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
4717 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
4718 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
4719
4720 h2c->mod_rfe = module->rfe_type;
4721 h2c->mod_cv = module->cv;
4722 h2c->mod_info =
4723 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
4724 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
4725 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
4726 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
4727 h2c->mod_adie_kt = module->kt_ver_adie;
4728 h2c->wl_gch = init_info->wl_guard_ch;
4729
4730 h2c->info =
4731 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
4732 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
4733 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
4734 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
4735 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
4736
4737 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4738 H2C_CAT_OUTSRC, BTFC_SET,
4739 SET_DRV_INFO, 0, 0,
4740 len);
4741
4742 ret = rtw89_h2c_tx(rtwdev, skb, false);
4743 if (ret) {
4744 rtw89_err(rtwdev, "failed to send h2c\n");
4745 goto fail;
4746 }
4747
4748 return 0;
4749 fail:
4750 dev_kfree_skb_any(skb);
4751
4752 return ret;
4753 }
4754
rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev * rtwdev,u8 type)4755 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type)
4756 {
4757 struct rtw89_btc *btc = &rtwdev->btc;
4758 struct rtw89_btc_dm *dm = &btc->dm;
4759 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7;
4760 struct rtw89_h2c_cxinit_v7 *h2c;
4761 u32 len = sizeof(*h2c);
4762 struct sk_buff *skb;
4763 int ret;
4764
4765 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4766 if (!skb) {
4767 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n");
4768 return -ENOMEM;
4769 }
4770 skb_put(skb, len);
4771 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data;
4772
4773 h2c->hdr.type = type;
4774 h2c->hdr.ver = btc->ver->fcxinit;
4775 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
4776 h2c->init = *init_info;
4777
4778 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4779 H2C_CAT_OUTSRC, BTFC_SET,
4780 SET_DRV_INFO, 0, 0,
4781 len);
4782
4783 ret = rtw89_h2c_tx(rtwdev, skb, false);
4784 if (ret) {
4785 rtw89_err(rtwdev, "failed to send h2c\n");
4786 goto fail;
4787 }
4788
4789 return 0;
4790 fail:
4791 dev_kfree_skb_any(skb);
4792
4793 return ret;
4794 }
4795
4796 #define PORT_DATA_OFFSET 4
4797 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
4798 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
4799 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
4800
rtw89_fw_h2c_cxdrv_role(struct rtw89_dev * rtwdev,u8 type)4801 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type)
4802 {
4803 struct rtw89_btc *btc = &rtwdev->btc;
4804 const struct rtw89_btc_ver *ver = btc->ver;
4805 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4806 struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
4807 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4808 struct rtw89_btc_wl_active_role *active = role_info->active_role;
4809 struct sk_buff *skb;
4810 u32 len;
4811 u8 offset = 0;
4812 u8 *cmd;
4813 int ret;
4814 int i;
4815
4816 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
4817
4818 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4819 if (!skb) {
4820 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4821 return -ENOMEM;
4822 }
4823 skb_put(skb, len);
4824 cmd = skb->data;
4825
4826 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4827 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4828
4829 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4830 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4831
4832 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4833 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4834 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4835 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4836 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4837 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4838 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4839 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4840 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4841 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4842 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4843 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4844
4845 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4846 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
4847 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
4848 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
4849 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
4850 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
4851 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
4852 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
4853 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
4854 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
4855 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
4856 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
4857 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
4858 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
4859 }
4860
4861 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4862 H2C_CAT_OUTSRC, BTFC_SET,
4863 SET_DRV_INFO, 0, 0,
4864 len);
4865
4866 ret = rtw89_h2c_tx(rtwdev, skb, false);
4867 if (ret) {
4868 rtw89_err(rtwdev, "failed to send h2c\n");
4869 goto fail;
4870 }
4871
4872 return 0;
4873 fail:
4874 dev_kfree_skb_any(skb);
4875
4876 return ret;
4877 }
4878
4879 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
4880 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
4881
rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev * rtwdev,u8 type)4882 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type)
4883 {
4884 struct rtw89_btc *btc = &rtwdev->btc;
4885 const struct rtw89_btc_ver *ver = btc->ver;
4886 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4887 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
4888 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4889 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
4890 struct sk_buff *skb;
4891 u32 len;
4892 u8 *cmd, offset;
4893 int ret;
4894 int i;
4895
4896 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
4897
4898 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4899 if (!skb) {
4900 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4901 return -ENOMEM;
4902 }
4903 skb_put(skb, len);
4904 cmd = skb->data;
4905
4906 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4907 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4908
4909 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
4910 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
4911
4912 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
4913 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
4914 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
4915 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
4916 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
4917 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
4918 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
4919 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
4920 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
4921 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
4922 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
4923 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
4924
4925 offset = PORT_DATA_OFFSET;
4926 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
4927 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
4928 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
4929 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
4930 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
4931 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
4932 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
4933 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
4934 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
4935 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
4936 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
4937 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
4938 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
4939 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
4940 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
4941 }
4942
4943 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
4944 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
4945 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
4946 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
4947 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
4948 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
4949 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
4950
4951 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4952 H2C_CAT_OUTSRC, BTFC_SET,
4953 SET_DRV_INFO, 0, 0,
4954 len);
4955
4956 ret = rtw89_h2c_tx(rtwdev, skb, false);
4957 if (ret) {
4958 rtw89_err(rtwdev, "failed to send h2c\n");
4959 goto fail;
4960 }
4961
4962 return 0;
4963 fail:
4964 dev_kfree_skb_any(skb);
4965
4966 return ret;
4967 }
4968
4969 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
4970 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
4971
rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev * rtwdev,u8 type)4972 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type)
4973 {
4974 struct rtw89_btc *btc = &rtwdev->btc;
4975 const struct rtw89_btc_ver *ver = btc->ver;
4976 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
4977 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
4978 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
4979 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
4980 struct sk_buff *skb;
4981 u32 len;
4982 u8 *cmd, offset;
4983 int ret;
4984 int i;
4985
4986 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
4987
4988 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4989 if (!skb) {
4990 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
4991 return -ENOMEM;
4992 }
4993 skb_put(skb, len);
4994 cmd = skb->data;
4995
4996 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
4997 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
4998
4999 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
5000 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
5001
5002 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
5003 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
5004 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
5005 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
5006 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
5007 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
5008 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
5009 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
5010 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
5011 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
5012 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
5013 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
5014
5015 offset = PORT_DATA_OFFSET;
5016 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
5017 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
5018 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
5019 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
5020 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
5021 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
5022 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
5023 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
5024 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
5025 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
5026 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
5027 }
5028
5029 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
5030 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
5031 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
5032 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
5033 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
5034 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
5035 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
5036
5037 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5038 H2C_CAT_OUTSRC, BTFC_SET,
5039 SET_DRV_INFO, 0, 0,
5040 len);
5041
5042 ret = rtw89_h2c_tx(rtwdev, skb, false);
5043 if (ret) {
5044 rtw89_err(rtwdev, "failed to send h2c\n");
5045 goto fail;
5046 }
5047
5048 return 0;
5049 fail:
5050 dev_kfree_skb_any(skb);
5051
5052 return ret;
5053 }
5054
rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev * rtwdev,u8 type)5055 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
5056 {
5057 struct rtw89_btc *btc = &rtwdev->btc;
5058 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
5059 struct rtw89_h2c_cxrole_v7 *h2c;
5060 u32 len = sizeof(*h2c);
5061 struct sk_buff *skb;
5062 int ret;
5063
5064 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5065 if (!skb) {
5066 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5067 return -ENOMEM;
5068 }
5069 skb_put(skb, len);
5070 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
5071
5072 h2c->hdr.type = type;
5073 h2c->hdr.ver = btc->ver->fwlrole;
5074 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5075 memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
5076 h2c->_u32.role_map = cpu_to_le32(role->role_map);
5077 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
5078 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
5079 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
5080 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
5081 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
5082
5083 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5084 H2C_CAT_OUTSRC, BTFC_SET,
5085 SET_DRV_INFO, 0, 0,
5086 len);
5087
5088 ret = rtw89_h2c_tx(rtwdev, skb, false);
5089 if (ret) {
5090 rtw89_err(rtwdev, "failed to send h2c\n");
5091 goto fail;
5092 }
5093
5094 return 0;
5095 fail:
5096 dev_kfree_skb_any(skb);
5097
5098 return ret;
5099 }
5100
rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev * rtwdev,u8 type)5101 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
5102 {
5103 struct rtw89_btc *btc = &rtwdev->btc;
5104 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8;
5105 struct rtw89_h2c_cxrole_v8 *h2c;
5106 u32 len = sizeof(*h2c);
5107 struct sk_buff *skb;
5108 int ret;
5109
5110 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5111 if (!skb) {
5112 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5113 return -ENOMEM;
5114 }
5115 skb_put(skb, len);
5116 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
5117
5118 h2c->hdr.type = type;
5119 h2c->hdr.ver = btc->ver->fwlrole;
5120 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5121 memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
5122 h2c->_u32.role_map = cpu_to_le32(role->role_map);
5123 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
5124 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
5125
5126 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5127 H2C_CAT_OUTSRC, BTFC_SET,
5128 SET_DRV_INFO, 0, 0,
5129 len);
5130
5131 ret = rtw89_h2c_tx(rtwdev, skb, false);
5132 if (ret) {
5133 rtw89_err(rtwdev, "failed to send h2c\n");
5134 goto fail;
5135 }
5136
5137 return 0;
5138 fail:
5139 dev_kfree_skb_any(skb);
5140
5141 return ret;
5142 }
5143
rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev * rtwdev,u8 type)5144 int rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev *rtwdev, u8 type)
5145 {
5146 struct rtw89_btc *btc = &rtwdev->btc;
5147 struct rtw89_btc_fbtc_outsrc_set_info *osi = &btc->dm.ost_info;
5148 struct rtw89_h2c_cxosi *h2c;
5149 u32 len = sizeof(*h2c);
5150 struct sk_buff *skb;
5151 int ret;
5152
5153 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5154 if (!skb) {
5155 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_osi\n");
5156 return -ENOMEM;
5157 }
5158 skb_put(skb, len);
5159 h2c = (struct rtw89_h2c_cxosi *)skb->data;
5160
5161 h2c->hdr.type = type;
5162 h2c->hdr.ver = btc->ver->fcxosi;
5163 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5164 h2c->osi = *osi;
5165
5166 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5167 H2C_CAT_OUTSRC, BTFC_SET,
5168 SET_DRV_INFO, 0, 0,
5169 len);
5170
5171 ret = rtw89_h2c_tx(rtwdev, skb, false);
5172 if (ret) {
5173 rtw89_err(rtwdev, "failed to send h2c\n");
5174 goto fail;
5175 }
5176
5177 return 0;
5178 fail:
5179 dev_kfree_skb_any(skb);
5180
5181 return ret;
5182 }
5183
5184 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev * rtwdev,u8 type)5185 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
5186 {
5187 struct rtw89_btc *btc = &rtwdev->btc;
5188 const struct rtw89_btc_ver *ver = btc->ver;
5189 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl;
5190 struct sk_buff *skb;
5191 u8 *cmd;
5192 int ret;
5193
5194 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
5195 if (!skb) {
5196 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5197 return -ENOMEM;
5198 }
5199 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
5200 cmd = skb->data;
5201
5202 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5203 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
5204
5205 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
5206 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
5207 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
5208 if (ver->fcxctrl == 0)
5209 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
5210
5211 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5212 H2C_CAT_OUTSRC, BTFC_SET,
5213 SET_DRV_INFO, 0, 0,
5214 H2C_LEN_CXDRVINFO_CTRL);
5215
5216 ret = rtw89_h2c_tx(rtwdev, skb, false);
5217 if (ret) {
5218 rtw89_err(rtwdev, "failed to send h2c\n");
5219 goto fail;
5220 }
5221
5222 return 0;
5223 fail:
5224 dev_kfree_skb_any(skb);
5225
5226 return ret;
5227 }
5228
rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev * rtwdev,u8 type)5229 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
5230 {
5231 struct rtw89_btc *btc = &rtwdev->btc;
5232 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7;
5233 struct rtw89_h2c_cxctrl_v7 *h2c;
5234 u32 len = sizeof(*h2c);
5235 struct sk_buff *skb;
5236 int ret;
5237
5238 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5239 if (!skb) {
5240 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
5241 return -ENOMEM;
5242 }
5243 skb_put(skb, len);
5244 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data;
5245
5246 h2c->hdr.type = type;
5247 h2c->hdr.ver = btc->ver->fcxctrl;
5248 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7;
5249 h2c->ctrl = *ctrl;
5250
5251 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5252 H2C_CAT_OUTSRC, BTFC_SET,
5253 SET_DRV_INFO, 0, 0, len);
5254
5255 ret = rtw89_h2c_tx(rtwdev, skb, false);
5256 if (ret) {
5257 rtw89_err(rtwdev, "failed to send h2c\n");
5258 goto fail;
5259 }
5260
5261 return 0;
5262 fail:
5263 dev_kfree_skb_any(skb);
5264
5265 return ret;
5266 }
5267
5268 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev * rtwdev,u8 type)5269 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type)
5270 {
5271 struct rtw89_btc *btc = &rtwdev->btc;
5272 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
5273 struct sk_buff *skb;
5274 u8 *cmd;
5275 int ret;
5276
5277 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
5278 if (!skb) {
5279 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
5280 return -ENOMEM;
5281 }
5282 skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
5283 cmd = skb->data;
5284
5285 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5286 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
5287
5288 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
5289 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
5290 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
5291 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
5292 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
5293 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
5294 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
5295 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
5296 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
5297 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
5298 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
5299 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
5300 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
5301 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
5302 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
5303 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
5304 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
5305
5306 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5307 H2C_CAT_OUTSRC, BTFC_SET,
5308 SET_DRV_INFO, 0, 0,
5309 H2C_LEN_CXDRVINFO_TRX);
5310
5311 ret = rtw89_h2c_tx(rtwdev, skb, false);
5312 if (ret) {
5313 rtw89_err(rtwdev, "failed to send h2c\n");
5314 goto fail;
5315 }
5316
5317 return 0;
5318 fail:
5319 dev_kfree_skb_any(skb);
5320
5321 return ret;
5322 }
5323
5324 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev * rtwdev,u8 type)5325 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type)
5326 {
5327 struct rtw89_btc *btc = &rtwdev->btc;
5328 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
5329 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
5330 struct sk_buff *skb;
5331 u8 *cmd;
5332 int ret;
5333
5334 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
5335 if (!skb) {
5336 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5337 return -ENOMEM;
5338 }
5339 skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
5340 cmd = skb->data;
5341
5342 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5343 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
5344
5345 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
5346 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
5347 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
5348 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
5349 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
5350
5351 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5352 H2C_CAT_OUTSRC, BTFC_SET,
5353 SET_DRV_INFO, 0, 0,
5354 H2C_LEN_CXDRVINFO_RFK);
5355
5356 ret = rtw89_h2c_tx(rtwdev, skb, false);
5357 if (ret) {
5358 rtw89_err(rtwdev, "failed to send h2c\n");
5359 goto fail;
5360 }
5361
5362 return 0;
5363 fail:
5364 dev_kfree_skb_any(skb);
5365
5366 return ret;
5367 }
5368
5369 #define H2C_LEN_PKT_OFLD 4
rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev * rtwdev,u8 id)5370 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
5371 {
5372 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5373 struct sk_buff *skb;
5374 unsigned int cond;
5375 u8 *cmd;
5376 int ret;
5377
5378 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
5379 if (!skb) {
5380 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
5381 return -ENOMEM;
5382 }
5383 skb_put(skb, H2C_LEN_PKT_OFLD);
5384 cmd = skb->data;
5385
5386 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
5387 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
5388
5389 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5390 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5391 H2C_FUNC_PACKET_OFLD, 1, 1,
5392 H2C_LEN_PKT_OFLD);
5393
5394 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
5395
5396 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5397 if (ret < 0) {
5398 rtw89_debug(rtwdev, RTW89_DBG_FW,
5399 "failed to del pkt ofld: id %d, ret %d\n",
5400 id, ret);
5401 return ret;
5402 }
5403
5404 rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
5405 return 0;
5406 }
5407
rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev * rtwdev,u8 * id,struct sk_buff * skb_ofld)5408 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
5409 struct sk_buff *skb_ofld)
5410 {
5411 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5412 struct sk_buff *skb;
5413 unsigned int cond;
5414 u8 *cmd;
5415 u8 alloc_id;
5416 int ret;
5417
5418 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
5419 RTW89_MAX_PKT_OFLD_NUM);
5420 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
5421 return -ENOSPC;
5422
5423 *id = alloc_id;
5424
5425 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
5426 if (!skb) {
5427 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
5428 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
5429 return -ENOMEM;
5430 }
5431 skb_put(skb, H2C_LEN_PKT_OFLD);
5432 cmd = skb->data;
5433
5434 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
5435 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
5436 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
5437 skb_put_data(skb, skb_ofld->data, skb_ofld->len);
5438
5439 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5440 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5441 H2C_FUNC_PACKET_OFLD, 1, 1,
5442 H2C_LEN_PKT_OFLD + skb_ofld->len);
5443
5444 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
5445
5446 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5447 if (ret < 0) {
5448 rtw89_debug(rtwdev, RTW89_DBG_FW,
5449 "failed to add pkt ofld: id %d, ret %d\n",
5450 alloc_id, ret);
5451 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
5452 return ret;
5453 }
5454
5455 return 0;
5456 }
5457
5458 static
rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev * rtwdev,int ch_num,struct list_head * chan_list)5459 int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num,
5460 struct list_head *chan_list)
5461 {
5462 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
5463 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5464 struct rtw89_h2c_chinfo_elem *elem;
5465 struct rtw89_mac_chinfo_ax *ch_info;
5466 struct rtw89_h2c_chinfo *h2c;
5467 struct sk_buff *skb;
5468 unsigned int cond;
5469 int skb_len;
5470 int ret;
5471
5472 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
5473
5474 skb_len = struct_size(h2c, elem, ch_num);
5475 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
5476 if (!skb) {
5477 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
5478 return -ENOMEM;
5479 }
5480 skb_put(skb, sizeof(*h2c));
5481 h2c = (struct rtw89_h2c_chinfo *)skb->data;
5482
5483 h2c->ch_num = ch_num;
5484 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
5485
5486 list_for_each_entry(ch_info, chan_list, list) {
5487 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
5488
5489 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
5490 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
5491 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
5492 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
5493
5494 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
5495 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
5496 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
5497 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
5498 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
5499 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
5500 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
5501 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
5502 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
5503 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
5504
5505 if (scan_info->extra_op.set)
5506 elem->w1 |= le32_encode_bits(ch_info->macid_tx,
5507 RTW89_H2C_CHINFO_W1_MACID_TX);
5508
5509 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
5510 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
5511 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
5512 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
5513
5514 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
5515 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
5516 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
5517 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
5518 }
5519
5520 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5521 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5522 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
5523
5524 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
5525
5526 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5527 if (ret) {
5528 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
5529 return ret;
5530 }
5531
5532 return 0;
5533 }
5534
5535 static
rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev * rtwdev,int ch_num,struct list_head * chan_list,struct rtw89_vif_link * rtwvif_link)5536 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
5537 struct list_head *chan_list,
5538 struct rtw89_vif_link *rtwvif_link)
5539 {
5540 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5541 struct rtw89_h2c_chinfo_elem_be *elem;
5542 struct rtw89_mac_chinfo_be *ch_info;
5543 struct rtw89_h2c_chinfo_be *h2c;
5544 struct sk_buff *skb;
5545 unsigned int cond;
5546 u8 ver = U8_MAX;
5547 int skb_len;
5548 int ret;
5549
5550 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
5551
5552 skb_len = struct_size(h2c, elem, ch_num);
5553 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
5554 if (!skb) {
5555 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
5556 return -ENOMEM;
5557 }
5558
5559 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
5560 ver = 0;
5561
5562 skb_put(skb, sizeof(*h2c));
5563 h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
5564
5565 h2c->ch_num = ch_num;
5566 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
5567 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx,
5568 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
5569
5570 list_for_each_entry(ch_info, chan_list, list) {
5571 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
5572
5573 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
5574 le32_encode_bits(ch_info->central_ch,
5575 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
5576 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
5577
5578 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
5579 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
5580 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
5581 le32_encode_bits(ch_info->pause_data,
5582 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
5583 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
5584 le32_encode_bits(ch_info->rand_seq_num,
5585 RTW89_H2C_CHINFO_BE_W1_RANDOM) |
5586 le32_encode_bits(ch_info->notify_action,
5587 RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
5588 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
5589 RTW89_H2C_CHINFO_BE_W1_PROBE) |
5590 le32_encode_bits(ch_info->leave_crit,
5591 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
5592 le32_encode_bits(ch_info->chkpt_timer,
5593 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
5594
5595 elem->w2 = le32_encode_bits(ch_info->leave_time,
5596 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
5597 le32_encode_bits(ch_info->leave_th,
5598 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
5599 le32_encode_bits(ch_info->tx_pkt_ctrl,
5600 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
5601
5602 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
5603 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
5604 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
5605 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
5606
5607 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
5608 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
5609 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
5610 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
5611
5612 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
5613 le32_encode_bits(ch_info->fw_probe0_ssids,
5614 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
5615
5616 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
5617 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
5618 le32_encode_bits(ch_info->fw_probe0_bssids,
5619 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
5620 if (ver == 0)
5621 elem->w0 |=
5622 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
5623 else
5624 elem->w7 = le32_encode_bits(ch_info->period,
5625 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
5626 }
5627
5628 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5629 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5630 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
5631
5632 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
5633
5634 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5635 if (ret) {
5636 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
5637 return ret;
5638 }
5639
5640 return 0;
5641 }
5642
rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif_link * rtwvif_link,bool wowlan)5643 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
5644 struct rtw89_scan_option *option,
5645 struct rtw89_vif_link *rtwvif_link,
5646 bool wowlan)
5647 {
5648 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
5649 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5650 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
5651 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE;
5652 struct rtw89_h2c_scanofld *h2c;
5653 u32 len = sizeof(*h2c);
5654 struct sk_buff *skb;
5655 unsigned int cond;
5656 u64 tsf = 0;
5657 int ret;
5658
5659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5660 if (!skb) {
5661 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
5662 return -ENOMEM;
5663 }
5664 skb_put(skb, len);
5665 h2c = (struct rtw89_h2c_scanofld *)skb->data;
5666
5667 if (option->delay) {
5668 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
5669 if (ret) {
5670 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret);
5671 scan_mode = RTW89_SCAN_IMMEDIATE;
5672 } else {
5673 scan_mode = RTW89_SCAN_DELAY;
5674 tsf += (u64)option->delay * 1000;
5675 }
5676 }
5677
5678 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
5679 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
5680 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) |
5681 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
5682
5683 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
5684 le32_encode_bits(option->target_ch_mode,
5685 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
5686 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) |
5687 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
5688
5689 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) |
5690 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD);
5691
5692 if (option->target_ch_mode) {
5693 h2c->w1 |= le32_encode_bits(op->band_width,
5694 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
5695 le32_encode_bits(op->primary_channel,
5696 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
5697 le32_encode_bits(op->channel,
5698 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
5699 h2c->w0 |= le32_encode_bits(op->band_type,
5700 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
5701 }
5702
5703 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf),
5704 RTW89_H2C_SCANOFLD_W3_TSF_HIGH);
5705 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf),
5706 RTW89_H2C_SCANOFLD_W4_TSF_LOW);
5707
5708 if (scan_info->extra_op.set)
5709 h2c->w6 = le32_encode_bits(scan_info->extra_op.macid,
5710 RTW89_H2C_SCANOFLD_W6_SECOND_MACID);
5711
5712 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5713 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5714 H2C_FUNC_SCANOFLD, 1, 1,
5715 len);
5716
5717 if (option->enable)
5718 cond = RTW89_SCANOFLD_WAIT_COND_START;
5719 else
5720 cond = RTW89_SCANOFLD_WAIT_COND_STOP;
5721
5722 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5723 if (ret) {
5724 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
5725 return ret;
5726 }
5727
5728 return 0;
5729 }
5730
rtw89_scan_get_6g_disabled_chan(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option)5731 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
5732 struct rtw89_scan_option *option)
5733 {
5734 struct ieee80211_supported_band *sband;
5735 struct ieee80211_channel *chan;
5736 u8 i, idx;
5737
5738 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
5739 if (!sband) {
5740 option->prohib_chan = U64_MAX;
5741 return;
5742 }
5743
5744 for (i = 0; i < sband->n_channels; i++) {
5745 chan = &sband->channels[i];
5746 if (chan->flags & IEEE80211_CHAN_DISABLED) {
5747 idx = (chan->hw_value - 1) / 4;
5748 option->prohib_chan |= BIT(idx);
5749 }
5750 }
5751 }
5752
rtw89_fw_h2c_scan_offload_be(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif_link * rtwvif_link,bool wowlan)5753 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
5754 struct rtw89_scan_option *option,
5755 struct rtw89_vif_link *rtwvif_link,
5756 bool wowlan)
5757 {
5758 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
5759 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
5760 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
5761 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
5762 struct cfg80211_scan_request *req = rtwvif->scan_req;
5763 struct rtw89_h2c_scanofld_be_macc_role *macc_role;
5764 struct rtw89_hw_scan_extra_op scan_op[2] = {};
5765 struct rtw89_chan *op = &scan_info->op_chan;
5766 struct rtw89_h2c_scanofld_be_opch *opch;
5767 struct rtw89_pktofld_info *pkt_info;
5768 struct rtw89_h2c_scanofld_be *h2c;
5769 struct ieee80211_vif *vif;
5770 struct sk_buff *skb;
5771 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
5772 u8 opch_size = sizeof(*opch) * option->num_opch;
5773 enum rtw89_scan_be_opmode opmode;
5774 u8 probe_id[NUM_NL80211_BANDS];
5775 u8 scan_offload_ver = U8_MAX;
5776 u8 cfg_len = sizeof(*h2c);
5777 unsigned int cond;
5778 u8 ap_idx = U8_MAX;
5779 u8 ver = U8_MAX;
5780 u8 policy_val;
5781 void *ptr;
5782 u8 txbcn;
5783 int ret;
5784 u32 len;
5785 u8 i;
5786
5787 scan_op[0].macid = rtwvif_link->mac_id;
5788 scan_op[0].port = rtwvif_link->port;
5789 scan_op[0].chan = *op;
5790 vif = rtwvif_to_vif(rtwvif_link->rtwvif);
5791 if (vif->type == NL80211_IFTYPE_AP)
5792 ap_idx = 0;
5793
5794 if (ext->set) {
5795 scan_op[1] = *ext;
5796 vif = rtwvif_to_vif(ext->rtwvif_link->rtwvif);
5797 if (vif->type == NL80211_IFTYPE_AP)
5798 ap_idx = 1;
5799 }
5800
5801 rtw89_scan_get_6g_disabled_chan(rtwdev, option);
5802
5803 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
5804 cfg_len = offsetofend(typeof(*h2c), w8);
5805 scan_offload_ver = 0;
5806 }
5807
5808 len = cfg_len + macc_role_size + opch_size;
5809 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5810 if (!skb) {
5811 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
5812 return -ENOMEM;
5813 }
5814
5815 skb_put(skb, len);
5816 h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
5817 ptr = skb->data;
5818
5819 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
5820
5821 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
5822 ver = 0;
5823
5824 if (!wowlan) {
5825 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
5826 if (pkt_info->wildcard_6ghz) {
5827 /* Provide wildcard as template */
5828 probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
5829 break;
5830 }
5831 }
5832 }
5833
5834 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
5835 le32_encode_bits(option->scan_mode,
5836 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
5837 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
5838 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
5839 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
5840 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
5841 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
5842 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
5843
5844 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
5845 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
5846 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
5847
5848 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
5849 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
5850 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
5851
5852 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
5853 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
5854 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
5855 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
5856
5857 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
5858 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
5859 le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
5860 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
5861 le32_encode_bits(option->delay / 1000, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
5862
5863 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
5864
5865 h2c->w6 = le32_encode_bits(option->prohib_chan,
5866 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
5867 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
5868 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
5869 if (!wowlan && req->no_cck) {
5870 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
5871 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
5872 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
5873 le32_encode_bits(RTW89_HW_RATE_OFDM6,
5874 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) |
5875 le32_encode_bits(RTW89_HW_RATE_OFDM6,
5876 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
5877 }
5878
5879 if (scan_offload_ver == 0)
5880 goto flex_member;
5881
5882 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
5883 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
5884 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0),
5885 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) |
5886 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0),
5887 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP);
5888
5889 flex_member:
5890 ptr += cfg_len;
5891
5892 for (i = 0; i < option->num_macc_role; i++) {
5893 macc_role = ptr;
5894 macc_role->w0 =
5895 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
5896 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
5897 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
5898 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
5899 ptr += sizeof(*macc_role);
5900 }
5901
5902 for (i = 0; i < option->num_opch; i++) {
5903 bool is_ap_idx = i == ap_idx;
5904
5905 opmode = is_ap_idx ? RTW89_SCAN_OPMODE_TBTT : RTW89_SCAN_OPMODE_INTV;
5906 policy_val = is_ap_idx ? 2 : RTW89_OFF_CHAN_TIME / 10;
5907 txbcn = is_ap_idx ? 1 : 0;
5908
5909 opch = ptr;
5910 opch->w0 = le32_encode_bits(scan_op[i].macid,
5911 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
5912 le32_encode_bits(option->band,
5913 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
5914 le32_encode_bits(scan_op[i].port,
5915 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
5916 le32_encode_bits(opmode,
5917 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
5918 le32_encode_bits(true,
5919 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
5920 le32_encode_bits(policy_val,
5921 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
5922
5923 opch->w1 = le32_encode_bits(scan_op[i].chan.band_type,
5924 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
5925 le32_encode_bits(scan_op[i].chan.band_width,
5926 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
5927 le32_encode_bits(0x3,
5928 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
5929 le32_encode_bits(scan_op[i].chan.primary_channel,
5930 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
5931 le32_encode_bits(scan_op[i].chan.channel,
5932 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
5933
5934 opch->w2 = le32_encode_bits(0,
5935 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
5936 le32_encode_bits(0,
5937 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
5938 le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2,
5939 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS) |
5940 le32_encode_bits(txbcn,
5941 RTW89_H2C_SCANOFLD_BE_OPCH_W2_TXBCN);
5942
5943 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5944 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
5945 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5946 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
5947 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5948 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
5949 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
5950 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
5951
5952 if (ver == 0)
5953 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
5954 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
5955 else
5956 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
5957 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
5958 ptr += sizeof(*opch);
5959 }
5960
5961 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5962 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5963 H2C_FUNC_SCANOFLD_BE, 1, 1,
5964 len);
5965
5966 if (option->enable)
5967 cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
5968 else
5969 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
5970
5971 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
5972 if (ret) {
5973 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
5974 return ret;
5975 }
5976
5977 return 0;
5978 }
5979
rtw89_fw_h2c_rf_reg(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info,u16 len,u8 page)5980 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
5981 struct rtw89_fw_h2c_rf_reg_info *info,
5982 u16 len, u8 page)
5983 {
5984 struct sk_buff *skb;
5985 u8 class = info->rf_path == RF_PATH_A ?
5986 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
5987 int ret;
5988
5989 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5990 if (!skb) {
5991 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
5992 return -ENOMEM;
5993 }
5994 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
5995
5996 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5997 H2C_CAT_OUTSRC, class, page, 0, 0,
5998 len);
5999
6000 ret = rtw89_h2c_tx(rtwdev, skb, false);
6001 if (ret) {
6002 rtw89_err(rtwdev, "failed to send h2c\n");
6003 goto fail;
6004 }
6005
6006 return 0;
6007 fail:
6008 dev_kfree_skb_any(skb);
6009
6010 return ret;
6011 }
6012
rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev * rtwdev)6013 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
6014 {
6015 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
6016 struct rtw89_fw_h2c_rf_get_mccch_v0 *mccch_v0;
6017 struct rtw89_fw_h2c_rf_get_mccch *mccch;
6018 u32 len = sizeof(*mccch);
6019 struct sk_buff *skb;
6020 u8 ver = U8_MAX;
6021 int ret;
6022 u8 idx;
6023
6024 if (RTW89_CHK_FW_FEATURE(RFK_NTFY_MCC_V0, &rtwdev->fw)) {
6025 len = sizeof(*mccch_v0);
6026 ver = 0;
6027 }
6028
6029 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6030 if (!skb) {
6031 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
6032 return -ENOMEM;
6033 }
6034 skb_put(skb, len);
6035
6036 idx = rfk_mcc->table_idx;
6037 if (ver == 0) {
6038 mccch_v0 = (struct rtw89_fw_h2c_rf_get_mccch_v0 *)skb->data;
6039 mccch_v0->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
6040 mccch_v0->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
6041 mccch_v0->band_0 = cpu_to_le32(rfk_mcc->band[0]);
6042 mccch_v0->band_1 = cpu_to_le32(rfk_mcc->band[1]);
6043 mccch_v0->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
6044 mccch_v0->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
6045 } else {
6046 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
6047 mccch->ch_0_0 = cpu_to_le32(rfk_mcc->ch[0]);
6048 mccch->ch_0_1 = cpu_to_le32(rfk_mcc->ch[0]);
6049 mccch->ch_1_0 = cpu_to_le32(rfk_mcc->ch[1]);
6050 mccch->ch_1_1 = cpu_to_le32(rfk_mcc->ch[1]);
6051 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
6052 }
6053
6054 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6055 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
6056 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
6057 len);
6058
6059 ret = rtw89_h2c_tx(rtwdev, skb, false);
6060 if (ret) {
6061 rtw89_err(rtwdev, "failed to send h2c\n");
6062 goto fail;
6063 }
6064
6065 return 0;
6066 fail:
6067 dev_kfree_skb_any(skb);
6068
6069 return ret;
6070 }
6071 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
6072
rtw89_fw_h2c_mcc_dig(struct rtw89_dev * rtwdev,enum rtw89_chanctx_idx chanctx_idx,u8 mcc_role_idx,u8 pd_val,bool en)6073 int rtw89_fw_h2c_mcc_dig(struct rtw89_dev *rtwdev,
6074 enum rtw89_chanctx_idx chanctx_idx,
6075 u8 mcc_role_idx, u8 pd_val, bool en)
6076 {
6077 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
6078 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6079 struct rtw89_h2c_mcc_dig *h2c;
6080 u32 len = sizeof(*h2c);
6081 struct sk_buff *skb;
6082 int ret;
6083
6084 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6085 if (!skb) {
6086 rtw89_err(rtwdev, "failed to alloc skb for h2c mcc_dig\n");
6087 return -ENOMEM;
6088 }
6089 skb_put(skb, len);
6090 h2c = (struct rtw89_h2c_mcc_dig *)skb->data;
6091
6092 h2c->w0 = le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_REG_CNT) |
6093 le32_encode_bits(en, RTW89_H2C_MCC_DIG_W0_DM_EN) |
6094 le32_encode_bits(mcc_role_idx, RTW89_H2C_MCC_DIG_W0_IDX) |
6095 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_SET) |
6096 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_PHY0_EN) |
6097 le32_encode_bits(chan->channel, RTW89_H2C_MCC_DIG_W0_CENTER_CH) |
6098 le32_encode_bits(chan->band_type, RTW89_H2C_MCC_DIG_W0_BAND_TYPE);
6099 h2c->w1 = le32_encode_bits(dig_regs->seg0_pd_reg,
6100 RTW89_H2C_MCC_DIG_W1_ADDR_LSB) |
6101 le32_encode_bits(dig_regs->seg0_pd_reg >> 8,
6102 RTW89_H2C_MCC_DIG_W1_ADDR_MSB) |
6103 le32_encode_bits(dig_regs->pd_lower_bound_mask,
6104 RTW89_H2C_MCC_DIG_W1_BMASK_LSB) |
6105 le32_encode_bits(dig_regs->pd_lower_bound_mask >> 8,
6106 RTW89_H2C_MCC_DIG_W1_BMASK_MSB);
6107 h2c->w2 = le32_encode_bits(pd_val, RTW89_H2C_MCC_DIG_W2_VAL_LSB);
6108
6109 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6110 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
6111 H2C_FUNC_FW_MCC_DIG, 0, 0, len);
6112
6113 ret = rtw89_h2c_tx(rtwdev, skb, false);
6114 if (ret) {
6115 rtw89_err(rtwdev, "failed to send h2c\n");
6116 goto fail;
6117 }
6118
6119 return 0;
6120 fail:
6121 dev_kfree_skb_any(skb);
6122
6123 return ret;
6124 }
6125
rtw89_fw_h2c_rf_ps_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)6126 int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
6127 {
6128 const struct rtw89_chip_info *chip = rtwdev->chip;
6129 struct rtw89_vif_link *rtwvif_link;
6130 struct rtw89_h2c_rf_ps_info *h2c;
6131 const struct rtw89_chan *chan;
6132 u32 len = sizeof(*h2c);
6133 unsigned int link_id;
6134 struct sk_buff *skb;
6135 int ret;
6136 u8 path;
6137 u32 val;
6138
6139 if (chip->chip_gen != RTW89_CHIP_BE)
6140 return 0;
6141
6142 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6143 if (!skb) {
6144 rtw89_err(rtwdev, "failed to alloc skb for h2c rf ps info\n");
6145 return -ENOMEM;
6146 }
6147 skb_put(skb, len);
6148 h2c = (struct rtw89_h2c_rf_ps_info *)skb->data;
6149 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
6150
6151 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
6152 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
6153 path = rtw89_phy_get_syn_sel(rtwdev, rtwvif_link->phy_idx);
6154 val = rtw89_chip_chan_to_rf18_val(rtwdev, chan);
6155
6156 if (path >= chip->rf_path_num || path >= NUM_OF_RTW89_FW_RFK_PATH) {
6157 rtw89_err(rtwdev, "unsupported rf path (%d)\n", path);
6158 ret = -ENOENT;
6159 goto fail;
6160 }
6161
6162 h2c->rf18[path] = cpu_to_le32(val);
6163 h2c->pri_ch[path] = chan->primary_channel;
6164 }
6165
6166 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6167 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
6168 H2C_FUNC_OUTSRC_RF_PS_INFO, 0, 0,
6169 sizeof(*h2c));
6170
6171 ret = rtw89_h2c_tx(rtwdev, skb, false);
6172 if (ret) {
6173 rtw89_err(rtwdev, "failed to send h2c\n");
6174 goto fail;
6175 }
6176
6177 return 0;
6178 fail:
6179 dev_kfree_skb_any(skb);
6180
6181 return ret;
6182 }
6183 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ps_info);
6184
rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)6185 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
6186 enum rtw89_phy_idx phy_idx)
6187 {
6188 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
6189 struct rtw89_fw_h2c_rfk_pre_info_common *common;
6190 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
6191 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
6192 struct rtw89_fw_h2c_rfk_pre_info *h2c;
6193 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
6194 u32 len = sizeof(*h2c);
6195 struct sk_buff *skb;
6196 u8 ver = U8_MAX;
6197 u8 tbl, path;
6198 u32 val32;
6199 int ret;
6200
6201 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
6202 len = sizeof(*h2c_v1);
6203 ver = 1;
6204 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
6205 len = sizeof(*h2c_v0);
6206 ver = 0;
6207 }
6208
6209 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6210 if (!skb) {
6211 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
6212 return -ENOMEM;
6213 }
6214 skb_put(skb, len);
6215 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
6216 common = &h2c->base_v1.common;
6217
6218 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
6219
6220 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
6221 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
6222
6223 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
6224 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
6225 common->dbcc.ch[path][tbl] =
6226 cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
6227 common->dbcc.band[path][tbl] =
6228 cpu_to_le32(rfk_mcc->data[path].band[tbl]);
6229 }
6230 }
6231
6232 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
6233 tbl_sel[path] = rfk_mcc->data[path].table_idx;
6234
6235 common->tbl.cur_ch[path] =
6236 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
6237 common->tbl.cur_band[path] =
6238 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
6239
6240 if (ver <= 1)
6241 continue;
6242
6243 h2c->cur_bandwidth[path] =
6244 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
6245 }
6246
6247 common->phy_idx = cpu_to_le32(phy_idx);
6248
6249 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
6250 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
6251
6252 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]);
6253 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]);
6254 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]);
6255
6256 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
6257 h2c_v0->ktbl_sel0 = cpu_to_le32(val32);
6258 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
6259 h2c_v0->ktbl_sel1 = cpu_to_le32(val32);
6260 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
6261 h2c_v0->rfmod0 = cpu_to_le32(val32);
6262 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
6263 h2c_v0->rfmod1 = cpu_to_le32(val32);
6264
6265 if (rtw89_is_mlo_1_1(rtwdev))
6266 h2c_v0->mlo_1_1 = cpu_to_le32(1);
6267
6268 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
6269
6270 goto done;
6271 }
6272
6273 if (rtw89_is_mlo_1_1(rtwdev)) {
6274 h2c_v1 = &h2c->base_v1;
6275 h2c_v1->mlo_1_1 = cpu_to_le32(1);
6276 }
6277 done:
6278 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6279 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6280 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
6281 len);
6282
6283 ret = rtw89_h2c_tx(rtwdev, skb, false);
6284 if (ret) {
6285 rtw89_err(rtwdev, "failed to send h2c\n");
6286 goto fail;
6287 }
6288
6289 return 0;
6290 fail:
6291 dev_kfree_skb_any(skb);
6292
6293 return ret;
6294 }
6295
rtw89_fw_h2c_rf_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,enum rtw89_tssi_mode tssi_mode)6296 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6297 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
6298 {
6299 struct rtw89_efuse *efuse = &rtwdev->efuse;
6300 struct rtw89_hal *hal = &rtwdev->hal;
6301 struct rtw89_h2c_rf_tssi *h2c;
6302 u32 len = sizeof(*h2c);
6303 struct sk_buff *skb;
6304 int ret;
6305
6306 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6307 if (!skb) {
6308 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
6309 return -ENOMEM;
6310 }
6311 skb_put(skb, len);
6312 h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
6313
6314 h2c->len = cpu_to_le16(len);
6315 h2c->phy = phy_idx;
6316 h2c->ch = chan->channel;
6317 h2c->bw = chan->band_width;
6318 h2c->band = chan->band_type;
6319 h2c->hwtx_en = true;
6320 h2c->cv = hal->cv;
6321 h2c->tssi_mode = tssi_mode;
6322 h2c->rfe_type = efuse->rfe_type;
6323
6324 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
6325 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
6326
6327 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6328 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6329 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
6330
6331 ret = rtw89_h2c_tx(rtwdev, skb, false);
6332 if (ret) {
6333 rtw89_err(rtwdev, "failed to send h2c\n");
6334 goto fail;
6335 }
6336
6337 return 0;
6338 fail:
6339 dev_kfree_skb_any(skb);
6340
6341 return ret;
6342 }
6343
rtw89_fw_h2c_rf_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)6344 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6345 const struct rtw89_chan *chan)
6346 {
6347 struct rtw89_hal *hal = &rtwdev->hal;
6348 struct rtw89_h2c_rf_iqk_v0 *h2c_v0;
6349 struct rtw89_h2c_rf_iqk *h2c;
6350 u32 len = sizeof(*h2c);
6351 struct sk_buff *skb;
6352 u8 ver = U8_MAX;
6353 int ret;
6354
6355 if (RTW89_CHK_FW_FEATURE(RFK_IQK_V0, &rtwdev->fw)) {
6356 len = sizeof(*h2c_v0);
6357 ver = 0;
6358 }
6359
6360 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6361 if (!skb) {
6362 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
6363 return -ENOMEM;
6364 }
6365 skb_put(skb, len);
6366
6367 if (ver == 0) {
6368 h2c_v0 = (struct rtw89_h2c_rf_iqk_v0 *)skb->data;
6369
6370 h2c_v0->phy_idx = cpu_to_le32(phy_idx);
6371 h2c_v0->dbcc = cpu_to_le32(rtwdev->dbcc_en);
6372
6373 goto done;
6374 }
6375
6376 h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
6377
6378 h2c->len = sizeof(*h2c);
6379 h2c->ktype = 0;
6380 h2c->phy = phy_idx;
6381 h2c->kpath = rtw89_phy_get_kpath(rtwdev, phy_idx);
6382 h2c->band = chan->band_type;
6383 h2c->bw = chan->band_width;
6384 h2c->ch = chan->channel;
6385 h2c->cv = hal->cv;
6386
6387 done:
6388 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6389 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6390 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
6391
6392 ret = rtw89_h2c_tx(rtwdev, skb, false);
6393 if (ret) {
6394 rtw89_err(rtwdev, "failed to send h2c\n");
6395 goto fail;
6396 }
6397
6398 return 0;
6399 fail:
6400 dev_kfree_skb_any(skb);
6401
6402 return ret;
6403 }
6404
rtw89_fw_h2c_rf_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)6405 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6406 const struct rtw89_chan *chan)
6407 {
6408 struct rtw89_h2c_rf_dpk *h2c;
6409 u32 len = sizeof(*h2c);
6410 struct sk_buff *skb;
6411 int ret;
6412
6413 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6414 if (!skb) {
6415 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
6416 return -ENOMEM;
6417 }
6418 skb_put(skb, len);
6419 h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
6420
6421 h2c->len = len;
6422 h2c->phy = phy_idx;
6423 h2c->dpk_enable = true;
6424 h2c->kpath = RF_AB;
6425 h2c->cur_band = chan->band_type;
6426 h2c->cur_bw = chan->band_width;
6427 h2c->cur_ch = chan->channel;
6428 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
6429
6430 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6431 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6432 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
6433
6434 ret = rtw89_h2c_tx(rtwdev, skb, false);
6435 if (ret) {
6436 rtw89_err(rtwdev, "failed to send h2c\n");
6437 goto fail;
6438 }
6439
6440 return 0;
6441 fail:
6442 dev_kfree_skb_any(skb);
6443
6444 return ret;
6445 }
6446
rtw89_fw_h2c_rf_txgapk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)6447 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6448 const struct rtw89_chan *chan)
6449 {
6450 struct rtw89_hal *hal = &rtwdev->hal;
6451 struct rtw89_h2c_rf_txgapk *h2c;
6452 u32 len = sizeof(*h2c);
6453 struct sk_buff *skb;
6454 int ret;
6455
6456 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6457 if (!skb) {
6458 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
6459 return -ENOMEM;
6460 }
6461 skb_put(skb, len);
6462 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
6463
6464 h2c->len = len;
6465 h2c->ktype = 2;
6466 h2c->phy = phy_idx;
6467 h2c->kpath = RF_AB;
6468 h2c->band = chan->band_type;
6469 h2c->bw = chan->band_width;
6470 h2c->ch = chan->channel;
6471 h2c->cv = hal->cv;
6472
6473 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6474 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6475 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
6476
6477 ret = rtw89_h2c_tx(rtwdev, skb, false);
6478 if (ret) {
6479 rtw89_err(rtwdev, "failed to send h2c\n");
6480 goto fail;
6481 }
6482
6483 return 0;
6484 fail:
6485 dev_kfree_skb_any(skb);
6486
6487 return ret;
6488 }
6489
rtw89_fw_h2c_rf_dack(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)6490 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6491 const struct rtw89_chan *chan)
6492 {
6493 struct rtw89_h2c_rf_dack *h2c;
6494 u32 len = sizeof(*h2c);
6495 struct sk_buff *skb;
6496 int ret;
6497
6498 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6499 if (!skb) {
6500 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
6501 return -ENOMEM;
6502 }
6503 skb_put(skb, len);
6504 h2c = (struct rtw89_h2c_rf_dack *)skb->data;
6505
6506 h2c->len = cpu_to_le32(len);
6507 h2c->phy = cpu_to_le32(phy_idx);
6508 h2c->type = cpu_to_le32(0);
6509
6510 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6511 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6512 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
6513
6514 ret = rtw89_h2c_tx(rtwdev, skb, false);
6515 if (ret) {
6516 rtw89_err(rtwdev, "failed to send h2c\n");
6517 goto fail;
6518 }
6519
6520 return 0;
6521 fail:
6522 dev_kfree_skb_any(skb);
6523
6524 return ret;
6525 }
6526
rtw89_fw_h2c_rf_rxdck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,bool is_chl_k)6527 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
6528 const struct rtw89_chan *chan, bool is_chl_k)
6529 {
6530 struct rtw89_h2c_rf_rxdck_v0 *v0;
6531 struct rtw89_h2c_rf_rxdck *h2c;
6532 u32 len = sizeof(*h2c);
6533 struct sk_buff *skb;
6534 int ver = -1;
6535 int ret;
6536
6537 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) {
6538 len = sizeof(*v0);
6539 ver = 0;
6540 }
6541
6542 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6543 if (!skb) {
6544 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
6545 return -ENOMEM;
6546 }
6547 skb_put(skb, len);
6548 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data;
6549
6550 v0->len = len;
6551 v0->phy = phy_idx;
6552 v0->is_afe = false;
6553 v0->kpath = RF_AB;
6554 v0->cur_band = chan->band_type;
6555 v0->cur_bw = chan->band_width;
6556 v0->cur_ch = chan->channel;
6557 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
6558
6559 if (ver == 0)
6560 goto hdr;
6561
6562 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
6563 h2c->is_chl_k = is_chl_k;
6564
6565 hdr:
6566 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6567 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
6568 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
6569
6570 ret = rtw89_h2c_tx(rtwdev, skb, false);
6571 if (ret) {
6572 rtw89_err(rtwdev, "failed to send h2c\n");
6573 goto fail;
6574 }
6575
6576 return 0;
6577 fail:
6578 dev_kfree_skb_any(skb);
6579
6580 return ret;
6581 }
6582
rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev * rtwdev,u8 h2c_class,u8 h2c_func,u8 * buf,u16 len,bool rack,bool dack)6583 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
6584 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
6585 bool rack, bool dack)
6586 {
6587 struct sk_buff *skb;
6588 int ret;
6589
6590 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6591 if (!skb) {
6592 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
6593 return -ENOMEM;
6594 }
6595 skb_put_data(skb, buf, len);
6596
6597 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6598 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
6599 len);
6600
6601 ret = rtw89_h2c_tx(rtwdev, skb, false);
6602 if (ret) {
6603 rtw89_err(rtwdev, "failed to send h2c\n");
6604 goto fail;
6605 }
6606
6607 return 0;
6608 fail:
6609 dev_kfree_skb_any(skb);
6610
6611 return ret;
6612 }
6613
rtw89_fw_h2c_raw(struct rtw89_dev * rtwdev,const u8 * buf,u16 len)6614 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
6615 {
6616 struct sk_buff *skb;
6617 int ret;
6618
6619 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
6620 if (!skb) {
6621 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
6622 return -ENOMEM;
6623 }
6624 skb_put_data(skb, buf, len);
6625
6626 ret = rtw89_h2c_tx(rtwdev, skb, false);
6627 if (ret) {
6628 rtw89_err(rtwdev, "failed to send h2c\n");
6629 goto fail;
6630 }
6631
6632 return 0;
6633 fail:
6634 dev_kfree_skb_any(skb);
6635
6636 return ret;
6637 }
6638
rtw89_fw_send_all_early_h2c(struct rtw89_dev * rtwdev)6639 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
6640 {
6641 struct rtw89_early_h2c *early_h2c;
6642
6643 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6644
6645 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
6646 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
6647 }
6648 }
6649
__rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)6650 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
6651 {
6652 struct rtw89_early_h2c *early_h2c, *tmp;
6653
6654 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
6655 list_del(&early_h2c->list);
6656 kfree(early_h2c->h2c);
6657 kfree(early_h2c);
6658 }
6659 }
6660
rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)6661 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
6662 {
6663 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6664
6665 __rtw89_fw_free_all_early_h2c(rtwdev);
6666 }
6667
rtw89_fw_c2h_parse_attr(struct sk_buff * c2h)6668 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
6669 {
6670 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
6671 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
6672
6673 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
6674 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
6675 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
6676 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
6677 }
6678
rtw89_fw_c2h_chk_atomic(struct rtw89_dev * rtwdev,struct sk_buff * c2h)6679 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
6680 struct sk_buff *c2h)
6681 {
6682 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
6683 u8 category = attr->category;
6684 u8 class = attr->class;
6685 u8 func = attr->func;
6686
6687 switch (category) {
6688 default:
6689 return false;
6690 case RTW89_C2H_CAT_MAC:
6691 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
6692 case RTW89_C2H_CAT_OUTSRC:
6693 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
6694 }
6695 }
6696
rtw89_fw_c2h_irqsafe(struct rtw89_dev * rtwdev,struct sk_buff * c2h)6697 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
6698 {
6699 rtw89_fw_c2h_parse_attr(c2h);
6700 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
6701 goto enqueue;
6702
6703 rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
6704 dev_kfree_skb_any(c2h);
6705 return;
6706
6707 enqueue:
6708 skb_queue_tail(&rtwdev->c2h_queue, c2h);
6709 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work);
6710 }
6711
rtw89_fw_c2h_cmd_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb)6712 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
6713 struct sk_buff *skb)
6714 {
6715 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
6716 u8 category = attr->category;
6717 u8 class = attr->class;
6718 u8 func = attr->func;
6719 u16 len = attr->len;
6720 bool dump = true;
6721
6722 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
6723 return;
6724
6725 switch (category) {
6726 case RTW89_C2H_CAT_TEST:
6727 break;
6728 case RTW89_C2H_CAT_MAC:
6729 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
6730 if (class == RTW89_MAC_C2H_CLASS_INFO &&
6731 func == RTW89_MAC_C2H_FUNC_C2H_LOG)
6732 dump = false;
6733 break;
6734 case RTW89_C2H_CAT_OUTSRC:
6735 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
6736 class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
6737 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
6738 else
6739 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
6740 break;
6741 }
6742
6743 if (dump)
6744 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
6745 }
6746
rtw89_fw_c2h_work(struct wiphy * wiphy,struct wiphy_work * work)6747 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work)
6748 {
6749 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
6750 c2h_work);
6751 struct sk_buff *skb, *tmp;
6752
6753 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6754
6755 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
6756 skb_unlink(skb, &rtwdev->c2h_queue);
6757 rtw89_fw_c2h_cmd_handle(rtwdev, skb);
6758 dev_kfree_skb_any(skb);
6759 }
6760 }
6761
rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev * rtwdev)6762 void rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev *rtwdev)
6763 {
6764 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6765 struct sk_buff *skb, *tmp;
6766 int limit;
6767
6768 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6769
6770 limit = skb_queue_len(&rtwdev->c2h_queue);
6771
6772 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
6773 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
6774
6775 if (--limit < 0)
6776 return;
6777
6778 if (!attr->is_scan_event || attr->scan_seq == scan_info->seq)
6779 continue;
6780
6781 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
6782 "purge obsoleted scan event with seq=%d (cur=%d)\n",
6783 attr->scan_seq, scan_info->seq);
6784
6785 skb_unlink(skb, &rtwdev->c2h_queue);
6786 dev_kfree_skb_any(skb);
6787 }
6788 }
6789
rtw89_fw_write_h2c_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * info)6790 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
6791 struct rtw89_mac_h2c_info *info)
6792 {
6793 const struct rtw89_chip_info *chip = rtwdev->chip;
6794 struct rtw89_fw_info *fw_info = &rtwdev->fw;
6795 const u32 *h2c_reg = chip->h2c_regs;
6796 u8 i, val, len;
6797 int ret;
6798
6799 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
6800 rtwdev, chip->h2c_ctrl_reg);
6801 if (ret) {
6802 rtw89_warn(rtwdev, "FW does not process h2c registers\n");
6803 return ret;
6804 }
6805
6806 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
6807 sizeof(info->u.h2creg[0]));
6808
6809 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
6810 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
6811
6812 for (i = 0; i < RTW89_H2CREG_MAX; i++)
6813 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
6814
6815 fw_info->h2c_counter++;
6816 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
6817 chip->h2c_counter_reg.mask, fw_info->h2c_counter);
6818 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
6819
6820 return 0;
6821 }
6822
rtw89_fw_read_c2h_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_c2h_info * info)6823 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
6824 struct rtw89_mac_c2h_info *info)
6825 {
6826 const struct rtw89_chip_info *chip = rtwdev->chip;
6827 struct rtw89_fw_info *fw_info = &rtwdev->fw;
6828 const u32 *c2h_reg = chip->c2h_regs;
6829 u32 ret, timeout;
6830 u8 i, val;
6831
6832 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
6833
6834 if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
6835 timeout = RTW89_C2H_TIMEOUT_USB;
6836 else
6837 timeout = RTW89_C2H_TIMEOUT;
6838
6839 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
6840 timeout, false, rtwdev,
6841 chip->c2h_ctrl_reg);
6842 if (ret) {
6843 rtw89_warn(rtwdev, "c2h reg timeout\n");
6844 return ret;
6845 }
6846
6847 for (i = 0; i < RTW89_C2HREG_MAX; i++)
6848 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
6849
6850 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
6851
6852 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
6853 info->content_len =
6854 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
6855 RTW89_C2HREG_HDR_LEN;
6856
6857 fw_info->c2h_counter++;
6858 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
6859 chip->c2h_counter_reg.mask, fw_info->c2h_counter);
6860
6861 return 0;
6862 }
6863
rtw89_fw_msg_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * h2c_info,struct rtw89_mac_c2h_info * c2h_info)6864 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
6865 struct rtw89_mac_h2c_info *h2c_info,
6866 struct rtw89_mac_c2h_info *c2h_info)
6867 {
6868 u32 ret;
6869
6870 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
6871 lockdep_assert_wiphy(rtwdev->hw->wiphy);
6872
6873 if (!h2c_info && !c2h_info)
6874 return -EINVAL;
6875
6876 if (!h2c_info)
6877 goto recv_c2h;
6878
6879 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
6880 if (ret)
6881 return ret;
6882
6883 recv_c2h:
6884 if (!c2h_info)
6885 return 0;
6886
6887 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
6888 if (ret)
6889 return ret;
6890
6891 return 0;
6892 }
6893
rtw89_fw_st_dbg_dump(struct rtw89_dev * rtwdev)6894 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
6895 {
6896 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
6897 rtw89_err(rtwdev, "[ERR]pwr is off\n");
6898 return;
6899 }
6900
6901 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
6902 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
6903 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
6904 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
6905 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
6906 rtw89_read32(rtwdev, R_AX_HALT_C2H));
6907 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
6908 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
6909
6910 rtw89_fw_prog_cnt_dump(rtwdev);
6911 }
6912
rtw89_hw_scan_release_pkt_list(struct rtw89_dev * rtwdev)6913 static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev)
6914 {
6915 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
6916 struct rtw89_pktofld_info *info, *tmp;
6917 u8 idx;
6918
6919 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
6920 if (!(rtwdev->chip->support_bands & BIT(idx)))
6921 continue;
6922
6923 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
6924 if (test_bit(info->id, rtwdev->pkt_offload))
6925 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
6926 list_del(&info->list);
6927 kfree(info);
6928 }
6929 }
6930 }
6931
rtw89_hw_scan_cleanup(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)6932 static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev,
6933 struct rtw89_vif_link *rtwvif_link)
6934 {
6935 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
6936 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6937 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6938
6939 mac->free_chan_list(rtwdev);
6940 rtw89_hw_scan_release_pkt_list(rtwdev);
6941
6942 rtwvif->scan_req = NULL;
6943 rtwvif->scan_ies = NULL;
6944 scan_info->scanning_vif = NULL;
6945 scan_info->abort = false;
6946 scan_info->connected = false;
6947 scan_info->delay = 0;
6948 }
6949
rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev * rtwdev,struct cfg80211_scan_request * req,struct rtw89_pktofld_info * info,enum nl80211_band band,u8 ssid_idx)6950 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
6951 struct cfg80211_scan_request *req,
6952 struct rtw89_pktofld_info *info,
6953 enum nl80211_band band, u8 ssid_idx)
6954 {
6955 if (band != NL80211_BAND_6GHZ)
6956 return false;
6957
6958 if (req->ssids[ssid_idx].ssid_len) {
6959 memcpy(info->ssid, req->ssids[ssid_idx].ssid,
6960 req->ssids[ssid_idx].ssid_len);
6961 info->ssid_len = req->ssids[ssid_idx].ssid_len;
6962 return false;
6963 } else {
6964 info->wildcard_6ghz = true;
6965 return true;
6966 }
6967 }
6968
rtw89_append_probe_req_ie(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct sk_buff * skb,u8 ssid_idx)6969 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
6970 struct rtw89_vif_link *rtwvif_link,
6971 struct sk_buff *skb, u8 ssid_idx)
6972 {
6973 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6974 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6975 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
6976 struct cfg80211_scan_request *req = rtwvif->scan_req;
6977 struct rtw89_pktofld_info *info;
6978 struct sk_buff *new;
6979 int ret = 0;
6980 u8 band;
6981
6982 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
6983 if (!(rtwdev->chip->support_bands & BIT(band)))
6984 continue;
6985
6986 new = skb_copy(skb, GFP_KERNEL);
6987 if (!new) {
6988 ret = -ENOMEM;
6989 goto out;
6990 }
6991 skb_put_data(new, ies->ies[band], ies->len[band]);
6992 skb_put_data(new, ies->common_ies, ies->common_ie_len);
6993
6994 info = kzalloc(sizeof(*info), GFP_KERNEL);
6995 if (!info) {
6996 ret = -ENOMEM;
6997 kfree_skb(new);
6998 goto out;
6999 }
7000
7001 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx);
7002
7003 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
7004 if (ret) {
7005 kfree_skb(new);
7006 kfree(info);
7007 goto out;
7008 }
7009
7010 list_add_tail(&info->list, &scan_info->pkt_list[band]);
7011 kfree_skb(new);
7012 }
7013 out:
7014 return ret;
7015 }
7016
rtw89_hw_scan_update_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,const u8 * mac_addr)7017 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
7018 struct rtw89_vif_link *rtwvif_link,
7019 const u8 *mac_addr)
7020 {
7021 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7022 struct cfg80211_scan_request *req = rtwvif->scan_req;
7023 struct sk_buff *skb;
7024 u8 num = req->n_ssids, i;
7025 int ret;
7026
7027 for (i = 0; i < num; i++) {
7028 skb = ieee80211_probereq_get(rtwdev->hw, mac_addr,
7029 req->ssids[i].ssid,
7030 req->ssids[i].ssid_len,
7031 req->ie_len);
7032 if (!skb)
7033 return -ENOMEM;
7034
7035 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i);
7036 kfree_skb(skb);
7037
7038 if (ret)
7039 return ret;
7040 }
7041
7042 return 0;
7043 }
7044
rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev * rtwdev,struct ieee80211_scan_ies * ies,struct cfg80211_scan_request * req,struct rtw89_mac_chinfo_ax * ch_info)7045 static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev,
7046 struct ieee80211_scan_ies *ies,
7047 struct cfg80211_scan_request *req,
7048 struct rtw89_mac_chinfo_ax *ch_info)
7049 {
7050 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
7051 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
7052 struct cfg80211_scan_6ghz_params *params;
7053 struct rtw89_pktofld_info *info, *tmp;
7054 struct ieee80211_hdr *hdr;
7055 struct sk_buff *skb;
7056 bool found;
7057 int ret = 0;
7058 u8 i;
7059
7060 if (!req->n_6ghz_params)
7061 return 0;
7062
7063 for (i = 0; i < req->n_6ghz_params; i++) {
7064 params = &req->scan_6ghz_params[i];
7065
7066 if (req->channels[params->channel_idx]->hw_value !=
7067 ch_info->pri_ch)
7068 continue;
7069
7070 found = false;
7071 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
7072 if (ether_addr_equal(tmp->bssid, params->bssid)) {
7073 found = true;
7074 break;
7075 }
7076 }
7077 if (found)
7078 continue;
7079
7080 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
7081 NULL, 0, req->ie_len);
7082 if (!skb)
7083 return -ENOMEM;
7084
7085 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
7086 skb_put_data(skb, ies->common_ies, ies->common_ie_len);
7087 hdr = (struct ieee80211_hdr *)skb->data;
7088 ether_addr_copy(hdr->addr3, params->bssid);
7089
7090 info = kzalloc(sizeof(*info), GFP_KERNEL);
7091 if (!info) {
7092 ret = -ENOMEM;
7093 kfree_skb(skb);
7094 goto out;
7095 }
7096
7097 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
7098 if (ret) {
7099 kfree_skb(skb);
7100 kfree(info);
7101 goto out;
7102 }
7103
7104 ether_addr_copy(info->bssid, params->bssid);
7105 info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
7106 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
7107
7108 ch_info->tx_pkt = true;
7109 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
7110
7111 kfree_skb(skb);
7112 }
7113
7114 out:
7115 return ret;
7116 }
7117
rtw89_pno_scan_add_chan_ax(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_ax * ch_info)7118 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
7119 int chan_type, int ssid_num,
7120 struct rtw89_mac_chinfo_ax *ch_info)
7121 {
7122 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7123 struct rtw89_pktofld_info *info;
7124 u8 probe_count = 0;
7125
7126 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
7127 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
7128 ch_info->bw = RTW89_SCAN_WIDTH;
7129 ch_info->tx_pkt = true;
7130 ch_info->cfg_tx_pwr = false;
7131 ch_info->tx_pwr_idx = 0;
7132 ch_info->tx_null = false;
7133 ch_info->pause_data = false;
7134 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
7135
7136 if (ssid_num) {
7137 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
7138 if (info->channel_6ghz &&
7139 ch_info->pri_ch != info->channel_6ghz)
7140 continue;
7141 else if (info->channel_6ghz && probe_count != 0)
7142 ch_info->period += RTW89_CHANNEL_TIME_6G;
7143
7144 if (info->wildcard_6ghz)
7145 continue;
7146
7147 ch_info->pkt_id[probe_count++] = info->id;
7148 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
7149 break;
7150 }
7151 ch_info->num_pkt = probe_count;
7152 }
7153
7154 switch (chan_type) {
7155 case RTW89_CHAN_DFS:
7156 if (ch_info->ch_band != RTW89_BAND_6G)
7157 ch_info->period = max_t(u8, ch_info->period,
7158 RTW89_DFS_CHAN_TIME);
7159 ch_info->dwell_time = RTW89_DWELL_TIME;
7160 break;
7161 case RTW89_CHAN_ACTIVE:
7162 break;
7163 default:
7164 rtw89_err(rtwdev, "Channel type out of bound\n");
7165 }
7166 }
7167
rtw89_hw_scan_add_chan_ax(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_ax * ch_info)7168 static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
7169 int ssid_num,
7170 struct rtw89_mac_chinfo_ax *ch_info)
7171 {
7172 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7173 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
7174 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
7175 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7176 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
7177 struct cfg80211_scan_request *req = rtwvif->scan_req;
7178 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
7179 struct rtw89_pktofld_info *info;
7180 u8 band, probe_count = 0;
7181 int ret;
7182
7183 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
7184 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
7185 ch_info->bw = RTW89_SCAN_WIDTH;
7186 ch_info->tx_pkt = true;
7187 ch_info->cfg_tx_pwr = false;
7188 ch_info->tx_pwr_idx = 0;
7189 ch_info->tx_null = false;
7190 ch_info->pause_data = false;
7191 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
7192
7193 if (ch_info->ch_band == RTW89_BAND_6G) {
7194 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
7195 !ch_info->is_psc) {
7196 ch_info->tx_pkt = false;
7197 if (!req->duration_mandatory)
7198 ch_info->period -= RTW89_DWELL_TIME_6G;
7199 }
7200 }
7201
7202 ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info);
7203 if (ret)
7204 rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
7205
7206 if (ssid_num) {
7207 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
7208
7209 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
7210 if (info->channel_6ghz &&
7211 ch_info->pri_ch != info->channel_6ghz)
7212 continue;
7213 else if (info->channel_6ghz && probe_count != 0)
7214 ch_info->period += RTW89_CHANNEL_TIME_6G;
7215
7216 if (info->wildcard_6ghz)
7217 continue;
7218
7219 ch_info->pkt_id[probe_count++] = info->id;
7220 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
7221 break;
7222 }
7223 ch_info->num_pkt = probe_count;
7224 }
7225
7226 switch (chan_type) {
7227 case RTW89_CHAN_OPERATE:
7228 ch_info->central_ch = op->channel;
7229 ch_info->pri_ch = op->primary_channel;
7230 ch_info->ch_band = op->band_type;
7231 ch_info->bw = op->band_width;
7232 ch_info->tx_null = true;
7233 ch_info->num_pkt = 0;
7234 break;
7235 case RTW89_CHAN_DFS:
7236 if (ch_info->ch_band != RTW89_BAND_6G)
7237 ch_info->period = max_t(u8, ch_info->period,
7238 RTW89_DFS_CHAN_TIME);
7239 ch_info->dwell_time = RTW89_DWELL_TIME;
7240 ch_info->pause_data = true;
7241 break;
7242 case RTW89_CHAN_ACTIVE:
7243 ch_info->pause_data = true;
7244 break;
7245 case RTW89_CHAN_EXTRA_OP:
7246 ch_info->central_ch = ext->chan.channel;
7247 ch_info->pri_ch = ext->chan.primary_channel;
7248 ch_info->ch_band = ext->chan.band_type;
7249 ch_info->bw = ext->chan.band_width;
7250 ch_info->tx_null = true;
7251 ch_info->num_pkt = 0;
7252 ch_info->macid_tx = true;
7253 break;
7254 default:
7255 rtw89_err(rtwdev, "Channel type out of bound\n");
7256 }
7257 }
7258
rtw89_pno_scan_add_chan_be(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_be * ch_info)7259 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
7260 int ssid_num,
7261 struct rtw89_mac_chinfo_be *ch_info)
7262 {
7263 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7264 struct rtw89_pktofld_info *info;
7265 u8 probe_count = 0, i;
7266
7267 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
7268 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
7269 ch_info->bw = RTW89_SCAN_WIDTH;
7270 ch_info->tx_null = false;
7271 ch_info->pause_data = false;
7272 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
7273
7274 if (ssid_num) {
7275 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
7276 ch_info->pkt_id[probe_count++] = info->id;
7277 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
7278 break;
7279 }
7280 }
7281
7282 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
7283 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
7284
7285 switch (chan_type) {
7286 case RTW89_CHAN_DFS:
7287 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
7288 ch_info->dwell_time = RTW89_DWELL_TIME;
7289 break;
7290 case RTW89_CHAN_ACTIVE:
7291 break;
7292 default:
7293 rtw89_warn(rtwdev, "Channel type out of bound\n");
7294 break;
7295 }
7296 }
7297
rtw89_hw_scan_add_chan_be(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_be * ch_info)7298 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
7299 int ssid_num,
7300 struct rtw89_mac_chinfo_be *ch_info)
7301 {
7302 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7303 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
7304 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7305 struct cfg80211_scan_request *req = rtwvif->scan_req;
7306 struct rtw89_pktofld_info *info;
7307 u8 band, probe_count = 0, i;
7308
7309 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
7310 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
7311 ch_info->bw = RTW89_SCAN_WIDTH;
7312 ch_info->tx_null = false;
7313 ch_info->pause_data = false;
7314 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
7315
7316 if (ssid_num) {
7317 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
7318
7319 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
7320 if (info->channel_6ghz &&
7321 ch_info->pri_ch != info->channel_6ghz)
7322 continue;
7323
7324 if (info->wildcard_6ghz)
7325 continue;
7326
7327 ch_info->pkt_id[probe_count++] = info->id;
7328 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
7329 break;
7330 }
7331 }
7332
7333 if (ch_info->ch_band == RTW89_BAND_6G) {
7334 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
7335 !ch_info->is_psc) {
7336 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
7337 if (!req->duration_mandatory)
7338 ch_info->period -= RTW89_DWELL_TIME_6G;
7339 }
7340 }
7341
7342 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
7343 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
7344
7345 switch (chan_type) {
7346 case RTW89_CHAN_DFS:
7347 if (ch_info->ch_band != RTW89_BAND_6G)
7348 ch_info->period =
7349 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
7350 ch_info->dwell_time = RTW89_DWELL_TIME;
7351 ch_info->pause_data = true;
7352 break;
7353 case RTW89_CHAN_ACTIVE:
7354 ch_info->pause_data = true;
7355 break;
7356 default:
7357 rtw89_warn(rtwdev, "Channel type out of bound\n");
7358 break;
7359 }
7360 }
7361
rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7362 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
7363 struct rtw89_vif_link *rtwvif_link)
7364 {
7365 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7366 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
7367 struct rtw89_mac_chinfo_ax *ch_info, *tmp;
7368 struct ieee80211_channel *channel;
7369 struct list_head chan_list;
7370 int list_len;
7371 enum rtw89_chan_type type;
7372 int ret = 0;
7373 u32 idx;
7374
7375 INIT_LIST_HEAD(&chan_list);
7376 for (idx = 0, list_len = 0;
7377 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
7378 idx++, list_len++) {
7379 channel = nd_config->channels[idx];
7380 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7381 if (!ch_info) {
7382 ret = -ENOMEM;
7383 goto out;
7384 }
7385
7386 ch_info->period = RTW89_CHANNEL_TIME;
7387 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7388 ch_info->central_ch = channel->hw_value;
7389 ch_info->pri_ch = channel->hw_value;
7390 ch_info->is_psc = cfg80211_channel_is_psc(channel);
7391
7392 if (channel->flags &
7393 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7394 type = RTW89_CHAN_DFS;
7395 else
7396 type = RTW89_CHAN_ACTIVE;
7397
7398 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
7399 list_add_tail(&ch_info->list, &chan_list);
7400 }
7401 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list);
7402
7403 out:
7404 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7405 list_del(&ch_info->list);
7406 kfree(ch_info);
7407 }
7408
7409 return ret;
7410 }
7411
rtw89_hw_scan_add_op_types_ax(struct rtw89_dev * rtwdev,enum rtw89_chan_type type,struct list_head * chan_list,struct cfg80211_scan_request * req,int * off_chan_time)7412 static int rtw89_hw_scan_add_op_types_ax(struct rtw89_dev *rtwdev,
7413 enum rtw89_chan_type type,
7414 struct list_head *chan_list,
7415 struct cfg80211_scan_request *req,
7416 int *off_chan_time)
7417 {
7418 struct rtw89_mac_chinfo_ax *tmp;
7419
7420 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
7421 if (!tmp)
7422 return -ENOMEM;
7423
7424 switch (type) {
7425 case RTW89_CHAN_OPERATE:
7426 tmp->period = req->duration_mandatory ?
7427 req->duration : RTW89_CHANNEL_TIME;
7428 *off_chan_time = 0;
7429 break;
7430 case RTW89_CHAN_EXTRA_OP:
7431 tmp->period = RTW89_CHANNEL_TIME_EXTRA_OP;
7432 /* still calc @off_chan_time for scan op */
7433 *off_chan_time += tmp->period;
7434 break;
7435 default:
7436 kfree(tmp);
7437 return -EINVAL;
7438 }
7439
7440 rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp);
7441 list_add_tail(&tmp->list, chan_list);
7442
7443 return 0;
7444 }
7445
rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7446 int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
7447 struct rtw89_vif_link *rtwvif_link)
7448 {
7449 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7450 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
7451 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7452 struct cfg80211_scan_request *req = rtwvif->scan_req;
7453 struct rtw89_mac_chinfo_ax *ch_info, *tmp;
7454 struct ieee80211_channel *channel;
7455 struct list_head chan_list;
7456 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
7457 enum rtw89_chan_type type;
7458 int off_chan_time = 0;
7459 int ret;
7460 u32 idx;
7461
7462 INIT_LIST_HEAD(&chan_list);
7463
7464 for (idx = 0; idx < req->n_channels; idx++) {
7465 channel = req->channels[idx];
7466 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7467 if (!ch_info) {
7468 ret = -ENOMEM;
7469 goto out;
7470 }
7471
7472 if (req->duration)
7473 ch_info->period = req->duration;
7474 else if (channel->band == NL80211_BAND_6GHZ)
7475 ch_info->period = RTW89_CHANNEL_TIME_6G +
7476 RTW89_DWELL_TIME_6G;
7477 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
7478 ch_info->period = RTW89_P2P_CHAN_TIME;
7479 else
7480 ch_info->period = RTW89_CHANNEL_TIME;
7481
7482 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7483 ch_info->central_ch = channel->hw_value;
7484 ch_info->pri_ch = channel->hw_value;
7485 ch_info->rand_seq_num = random_seq;
7486 ch_info->is_psc = cfg80211_channel_is_psc(channel);
7487
7488 if (channel->flags &
7489 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7490 type = RTW89_CHAN_DFS;
7491 else
7492 type = RTW89_CHAN_ACTIVE;
7493 rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info);
7494
7495 if (!(scan_info->connected &&
7496 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME))
7497 goto next;
7498
7499 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_OPERATE,
7500 &chan_list, req, &off_chan_time);
7501 if (ret) {
7502 kfree(ch_info);
7503 goto out;
7504 }
7505
7506 if (!ext->set)
7507 goto next;
7508
7509 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_EXTRA_OP,
7510 &chan_list, req, &off_chan_time);
7511 if (ret) {
7512 kfree(ch_info);
7513 goto out;
7514 }
7515
7516 next:
7517 list_add_tail(&ch_info->list, &chan_list);
7518 off_chan_time += ch_info->period;
7519 }
7520
7521 list_splice_tail(&chan_list, &scan_info->chan_list);
7522 return 0;
7523
7524 out:
7525 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7526 list_del(&ch_info->list);
7527 kfree(ch_info);
7528 }
7529
7530 return ret;
7531 }
7532
rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev * rtwdev)7533 void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev)
7534 {
7535 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7536 struct rtw89_mac_chinfo_ax *ch_info, *tmp;
7537
7538 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
7539 list_del(&ch_info->list);
7540 kfree(ch_info);
7541 }
7542 }
7543
rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7544 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
7545 struct rtw89_vif_link *rtwvif_link)
7546 {
7547 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7548 struct rtw89_mac_chinfo_ax *ch_info, *tmp;
7549 unsigned int list_len = 0;
7550 struct list_head list;
7551 int ret;
7552
7553 INIT_LIST_HEAD(&list);
7554
7555 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
7556 list_move_tail(&ch_info->list, &list);
7557
7558 list_len++;
7559 if (list_len == RTW89_SCAN_LIST_LIMIT_AX)
7560 break;
7561 }
7562
7563 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list);
7564
7565 list_for_each_entry_safe(ch_info, tmp, &list, list) {
7566 list_del(&ch_info->list);
7567 kfree(ch_info);
7568 }
7569
7570 return ret;
7571 }
7572
rtw89_pno_scan_add_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7573 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
7574 struct rtw89_vif_link *rtwvif_link)
7575 {
7576 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
7577 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
7578 struct rtw89_mac_chinfo_be *ch_info, *tmp;
7579 struct ieee80211_channel *channel;
7580 struct list_head chan_list;
7581 enum rtw89_chan_type type;
7582 int list_len, ret;
7583 u32 idx;
7584
7585 INIT_LIST_HEAD(&chan_list);
7586
7587 for (idx = 0, list_len = 0;
7588 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
7589 idx++, list_len++) {
7590 channel = nd_config->channels[idx];
7591 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7592 if (!ch_info) {
7593 ret = -ENOMEM;
7594 goto out;
7595 }
7596
7597 ch_info->period = RTW89_CHANNEL_TIME;
7598 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7599 ch_info->central_ch = channel->hw_value;
7600 ch_info->pri_ch = channel->hw_value;
7601 ch_info->is_psc = cfg80211_channel_is_psc(channel);
7602
7603 if (channel->flags &
7604 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7605 type = RTW89_CHAN_DFS;
7606 else
7607 type = RTW89_CHAN_ACTIVE;
7608
7609 rtw89_pno_scan_add_chan_be(rtwdev, type,
7610 nd_config->n_match_sets, ch_info);
7611 list_add_tail(&ch_info->list, &chan_list);
7612 }
7613
7614 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
7615 rtwvif_link);
7616
7617 out:
7618 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7619 list_del(&ch_info->list);
7620 kfree(ch_info);
7621 }
7622
7623 return ret;
7624 }
7625
rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7626 int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev,
7627 struct rtw89_vif_link *rtwvif_link)
7628 {
7629 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7630 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7631 struct cfg80211_scan_request *req = rtwvif->scan_req;
7632 struct rtw89_mac_chinfo_be *ch_info, *tmp;
7633 struct ieee80211_channel *channel;
7634 struct list_head chan_list;
7635 enum rtw89_chan_type type;
7636 bool random_seq;
7637 int ret;
7638 u32 idx;
7639
7640 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
7641 INIT_LIST_HEAD(&chan_list);
7642
7643 for (idx = 0; idx < req->n_channels; idx++) {
7644 channel = req->channels[idx];
7645 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL);
7646 if (!ch_info) {
7647 ret = -ENOMEM;
7648 goto out;
7649 }
7650
7651 if (req->duration)
7652 ch_info->period = req->duration;
7653 else if (channel->band == NL80211_BAND_6GHZ)
7654 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
7655 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
7656 ch_info->period = RTW89_P2P_CHAN_TIME;
7657 else
7658 ch_info->period = RTW89_CHANNEL_TIME;
7659
7660 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
7661 ch_info->central_ch = channel->hw_value;
7662 ch_info->pri_ch = channel->hw_value;
7663 ch_info->rand_seq_num = random_seq;
7664 ch_info->is_psc = cfg80211_channel_is_psc(channel);
7665
7666 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
7667 type = RTW89_CHAN_DFS;
7668 else
7669 type = RTW89_CHAN_ACTIVE;
7670 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
7671
7672 list_add_tail(&ch_info->list, &chan_list);
7673 }
7674
7675 list_splice_tail(&chan_list, &scan_info->chan_list);
7676 return 0;
7677
7678 out:
7679 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
7680 list_del(&ch_info->list);
7681 kfree(ch_info);
7682 }
7683
7684 return ret;
7685 }
7686
rtw89_hw_scan_free_chan_list_be(struct rtw89_dev * rtwdev)7687 void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev)
7688 {
7689 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7690 struct rtw89_mac_chinfo_be *ch_info, *tmp;
7691
7692 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
7693 list_del(&ch_info->list);
7694 kfree(ch_info);
7695 }
7696 }
7697
rtw89_hw_scan_add_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)7698 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
7699 struct rtw89_vif_link *rtwvif_link)
7700 {
7701 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7702 struct rtw89_mac_chinfo_be *ch_info, *tmp;
7703 unsigned int list_len = 0;
7704 struct list_head list;
7705 int ret;
7706
7707 INIT_LIST_HEAD(&list);
7708
7709 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
7710 list_move_tail(&ch_info->list, &list);
7711
7712 list_len++;
7713 if (list_len == RTW89_SCAN_LIST_LIMIT_BE)
7714 break;
7715 }
7716
7717 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list,
7718 rtwvif_link);
7719
7720 list_for_each_entry_safe(ch_info, tmp, &list, list) {
7721 list_del(&ch_info->list);
7722 kfree(ch_info);
7723 }
7724
7725 return ret;
7726 }
7727
rtw89_hw_scan_prehandle(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,const u8 * mac_addr)7728 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
7729 struct rtw89_vif_link *rtwvif_link,
7730 const u8 *mac_addr)
7731 {
7732 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7733 int ret;
7734
7735 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr);
7736 if (ret) {
7737 rtw89_err(rtwdev, "Update probe request failed\n");
7738 goto out;
7739 }
7740 ret = mac->prep_chan_list(rtwdev, rtwvif_link);
7741 out:
7742 return ret;
7743 }
7744
rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u16 tu,bool scan)7745 static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev,
7746 struct rtw89_vif_link *rtwvif_link,
7747 u16 tu, bool scan)
7748 {
7749 struct ieee80211_p2p_noa_desc noa_desc = {};
7750 struct ieee80211_bss_conf *bss_conf;
7751 u16 beacon_int;
7752 u64 tsf;
7753 int ret;
7754
7755 rcu_read_lock();
7756
7757 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
7758 beacon_int = bss_conf->beacon_int;
7759
7760 rcu_read_unlock();
7761
7762 tu += beacon_int * 3;
7763 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
7764 rtwdev->scan_info.delay = ieee80211_tu_to_usec(beacon_int * 3) / 1000;
7765
7766 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
7767 if (ret) {
7768 rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__);
7769 return;
7770 }
7771
7772 noa_desc.start_time = cpu_to_le32(tsf);
7773 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) {
7774 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu));
7775 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu));
7776 noa_desc.count = 1;
7777 } else {
7778 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(20000));
7779 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(20000));
7780 noa_desc.count = 255;
7781 }
7782
7783 rtw89_p2p_noa_renew(rtwvif_link);
7784 if (scan)
7785 rtw89_p2p_noa_append(rtwvif_link, &noa_desc);
7786
7787 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
7788 }
7789
rtw89_hw_scan_update_beacon_noa(struct rtw89_dev * rtwdev,bool scan)7790 static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev, bool scan)
7791 {
7792 const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
7793 const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7794 const struct rtw89_chip_info *chip = rtwdev->chip;
7795 struct rtw89_mac_chinfo_ax *chinfo_ax;
7796 struct rtw89_mac_chinfo_be *chinfo_be;
7797 struct rtw89_vif_link *rtwvif_link;
7798 struct list_head *pos, *tmp;
7799 struct ieee80211_vif *vif;
7800 struct rtw89_vif *rtwvif;
7801 u16 tu = 0;
7802
7803 lockdep_assert_wiphy(rtwdev->hw->wiphy);
7804
7805 if (!scan)
7806 goto update;
7807
7808 list_for_each_safe(pos, tmp, &scan_info->chan_list) {
7809 switch (chip->chip_gen) {
7810 case RTW89_CHIP_AX:
7811 chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list);
7812 tu += chinfo_ax->period;
7813 break;
7814 case RTW89_CHIP_BE:
7815 chinfo_be = list_entry(pos, typeof(*chinfo_be), list);
7816 tu += chinfo_be->period;
7817 break;
7818 default:
7819 rtw89_warn(rtwdev, "%s: invalid chip gen %d\n",
7820 __func__, chip->chip_gen);
7821 return;
7822 }
7823 }
7824
7825 if (unlikely(tu == 0)) {
7826 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
7827 "%s: cannot estimate needed TU\n", __func__);
7828 return;
7829 }
7830
7831 update:
7832 list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) {
7833 unsigned int link_id;
7834
7835 vif = rtwvif_to_vif(rtwvif);
7836 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
7837 continue;
7838
7839 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
7840 rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link,
7841 tu, scan);
7842 }
7843 }
7844
rtw89_hw_scan_set_extra_op_info(struct rtw89_dev * rtwdev,struct rtw89_vif * scan_rtwvif,const struct rtw89_chan * scan_op)7845 static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev,
7846 struct rtw89_vif *scan_rtwvif,
7847 const struct rtw89_chan *scan_op)
7848 {
7849 struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
7850 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7851 struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
7852 struct rtw89_vif *tmp;
7853
7854 ext->set = false;
7855 if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_EXTRA_OP, &rtwdev->fw))
7856 return;
7857
7858 list_for_each_entry(tmp, &mgnt->active_list, mgnt_entry) {
7859 const struct rtw89_chan *tmp_chan;
7860 struct rtw89_vif_link *tmp_link;
7861
7862 if (tmp == scan_rtwvif)
7863 continue;
7864
7865 tmp_link = rtw89_vif_get_link_inst(tmp, 0);
7866 if (unlikely(!tmp_link)) {
7867 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
7868 "hw scan: no HW-0 link for extra op\n");
7869 continue;
7870 }
7871
7872 tmp_chan = rtw89_chan_get(rtwdev, tmp_link->chanctx_idx);
7873 *ext = (struct rtw89_hw_scan_extra_op){
7874 .set = true,
7875 .macid = tmp_link->mac_id,
7876 .port = tmp_link->port,
7877 .chan = *tmp_chan,
7878 .rtwvif_link = tmp_link,
7879 };
7880
7881 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
7882 "hw scan: extra op: center %d primary %d\n",
7883 ext->chan.channel, ext->chan.primary_channel);
7884 break;
7885 }
7886 }
7887
rtw89_hw_scan_start(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_scan_request * scan_req)7888 int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
7889 struct rtw89_vif_link *rtwvif_link,
7890 struct ieee80211_scan_request *scan_req)
7891 {
7892 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7893 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
7894 struct cfg80211_scan_request *req = &scan_req->req;
7895 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
7896 rtwvif_link->chanctx_idx);
7897 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
7898 struct rtw89_chanctx_pause_parm pause_parm = {
7899 .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN,
7900 .trigger = rtwvif_link,
7901 };
7902 u32 rx_fltr = rtwdev->hal.rx_fltr;
7903 u8 mac_addr[ETH_ALEN];
7904 u32 reg;
7905 int ret;
7906
7907 /* clone op and keep it during scan */
7908 rtwdev->scan_info.op_chan = *chan;
7909
7910 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
7911 "hw scan: op: center %d primary %d\n",
7912 chan->channel, chan->primary_channel);
7913
7914 rtw89_hw_scan_set_extra_op_info(rtwdev, rtwvif, chan);
7915
7916 rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
7917 rtwdev->scan_info.scanning_vif = rtwvif_link;
7918 rtwdev->scan_info.abort = false;
7919 rtwdev->scan_info.delay = 0;
7920 rtwvif->scan_ies = &scan_req->ies;
7921 rtwvif->scan_req = req;
7922
7923 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
7924 get_random_mask_addr(mac_addr, req->mac_addr,
7925 req->mac_addr_mask);
7926 else
7927 ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
7928
7929 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr);
7930 if (ret) {
7931 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
7932 return ret;
7933 }
7934
7935 ieee80211_stop_queues(rtwdev->hw);
7936 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
7937
7938 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true);
7939
7940 rx_fltr &= ~B_AX_A_BCN_CHK_EN;
7941 rx_fltr &= ~B_AX_A_BC;
7942 rx_fltr &= ~B_AX_A_A1_MATCH;
7943
7944 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
7945 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr);
7946
7947 rtw89_chanctx_pause(rtwdev, &pause_parm);
7948 rtw89_phy_dig_suspend(rtwdev);
7949
7950 if (mode == RTW89_ENTITY_MODE_MCC)
7951 rtw89_hw_scan_update_beacon_noa(rtwdev, true);
7952
7953 return 0;
7954 }
7955
7956 struct rtw89_hw_scan_complete_cb_data {
7957 struct rtw89_vif_link *rtwvif_link;
7958 bool aborted;
7959 };
7960
rtw89_hw_scan_complete_cb(struct rtw89_dev * rtwdev,void * data)7961 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
7962 {
7963 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
7964 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
7965 struct rtw89_hw_scan_complete_cb_data *cb_data = data;
7966 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
7967 struct cfg80211_scan_info info = {
7968 .aborted = cb_data->aborted,
7969 };
7970 u32 reg;
7971
7972 if (!rtwvif_link)
7973 return -EINVAL;
7974
7975 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx);
7976 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr);
7977
7978 rtw89_core_scan_complete(rtwdev, rtwvif_link, true);
7979 ieee80211_scan_completed(rtwdev->hw, &info);
7980 ieee80211_wake_queues(rtwdev->hw);
7981 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
7982 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
7983 rtw89_phy_dig_resume(rtwdev, true);
7984
7985 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
7986
7987 if (mode == RTW89_ENTITY_MODE_MCC)
7988 rtw89_hw_scan_update_beacon_noa(rtwdev, false);
7989
7990 return 0;
7991 }
7992
rtw89_hw_scan_complete(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool aborted)7993 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
7994 struct rtw89_vif_link *rtwvif_link,
7995 bool aborted)
7996 {
7997 struct rtw89_hw_scan_complete_cb_data cb_data = {
7998 .rtwvif_link = rtwvif_link,
7999 .aborted = aborted,
8000 };
8001 const struct rtw89_chanctx_cb_parm cb_parm = {
8002 .cb = rtw89_hw_scan_complete_cb,
8003 .data = &cb_data,
8004 .caller = __func__,
8005 };
8006
8007 /* The things here needs to be done after setting channel (for coex)
8008 * and before proceeding entity mode (for MCC). So, pass a callback
8009 * of them for the right sequence rather than doing them directly.
8010 */
8011 rtw89_chanctx_proceed(rtwdev, &cb_parm);
8012 }
8013
rtw89_hw_scan_abort(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)8014 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
8015 struct rtw89_vif_link *rtwvif_link)
8016 {
8017 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8018 int ret;
8019
8020 scan_info->abort = true;
8021
8022 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false);
8023 if (ret)
8024 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret);
8025
8026 /* Indicate ieee80211_scan_completed() before returning, which is safe
8027 * because scan abort command always waits for completion of
8028 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan
8029 * work properly.
8030 */
8031 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true);
8032 }
8033
rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev * rtwdev)8034 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
8035 {
8036 struct rtw89_vif_link *rtwvif_link;
8037 struct rtw89_vif *rtwvif;
8038 unsigned int link_id;
8039
8040 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
8041 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
8042 /* This variable implies connected or during attempt to connect */
8043 if (!is_zero_ether_addr(rtwvif_link->bssid))
8044 return true;
8045 }
8046 }
8047
8048 return false;
8049 }
8050
rtw89_hw_scan_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8051 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
8052 struct rtw89_vif_link *rtwvif_link,
8053 bool enable)
8054 {
8055 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
8056 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8057 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
8058 struct rtw89_scan_option opt = {0};
8059 bool connected;
8060 int ret = 0;
8061
8062 if (!rtwvif_link)
8063 return -EINVAL;
8064
8065 connected = rtwdev->scan_info.connected;
8066 opt.enable = enable;
8067 opt.target_ch_mode = connected;
8068 opt.delay = rtwdev->scan_info.delay;
8069 if (enable) {
8070 ret = mac->add_chan_list(rtwdev, rtwvif_link);
8071 if (ret)
8072 goto out;
8073 }
8074
8075 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
8076 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
8077 opt.scan_mode = RTW89_SCAN_MODE_SA;
8078 opt.band = rtwvif_link->mac_idx;
8079 opt.num_macc_role = 0;
8080 opt.mlo_mode = rtwdev->mlo_dbcc_mode;
8081 opt.num_opch = connected ? 1 : 0;
8082 if (connected && ext->set)
8083 opt.num_opch++;
8084
8085 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
8086 }
8087
8088 ret = rtw89_mac_scan_offload(rtwdev, &opt, rtwvif_link, false);
8089
8090 out:
8091 return ret;
8092 }
8093
8094 #define H2C_FW_CPU_EXCEPTION_TYPE_0 0x5566
8095 #define H2C_FW_CPU_EXCEPTION_TYPE_1 0x0
rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev * rtwdev)8096 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
8097 {
8098 struct rtw89_h2c_trig_cpu_except *h2c;
8099 u32 cpu_exception_type_def;
8100 u32 len = sizeof(*h2c);
8101 struct sk_buff *skb;
8102 int ret;
8103
8104 if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_1, &rtwdev->fw))
8105 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_1;
8106 else if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_0, &rtwdev->fw))
8107 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_0;
8108 else
8109 return -EOPNOTSUPP;
8110
8111 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8112 if (!skb) {
8113 rtw89_err(rtwdev,
8114 "failed to alloc skb for fw cpu exception\n");
8115 return -ENOMEM;
8116 }
8117
8118 skb_put(skb, len);
8119 h2c = (struct rtw89_h2c_trig_cpu_except *)skb->data;
8120
8121 h2c->w0 = le32_encode_bits(cpu_exception_type_def,
8122 RTW89_H2C_CPU_EXCEPTION_TYPE);
8123
8124 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8125 H2C_CAT_TEST,
8126 H2C_CL_FW_STATUS_TEST,
8127 H2C_FUNC_CPU_EXCEPTION, 0, 0,
8128 len);
8129
8130 ret = rtw89_h2c_tx(rtwdev, skb, false);
8131 if (ret) {
8132 rtw89_err(rtwdev, "failed to send h2c\n");
8133 dev_kfree_skb_any(skb);
8134 return ret;
8135 }
8136
8137 return 0;
8138 }
8139
8140 #define H2C_PKT_DROP_LEN 24
rtw89_fw_h2c_pkt_drop(struct rtw89_dev * rtwdev,const struct rtw89_pkt_drop_params * params)8141 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
8142 const struct rtw89_pkt_drop_params *params)
8143 {
8144 struct sk_buff *skb;
8145 int ret;
8146
8147 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
8148 if (!skb) {
8149 rtw89_err(rtwdev,
8150 "failed to alloc skb for packet drop\n");
8151 return -ENOMEM;
8152 }
8153
8154 switch (params->sel) {
8155 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
8156 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
8157 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
8158 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
8159 case RTW89_PKT_DROP_SEL_BAND_ONCE:
8160 break;
8161 default:
8162 rtw89_debug(rtwdev, RTW89_DBG_FW,
8163 "H2C of pkt drop might not fully support sel: %d yet\n",
8164 params->sel);
8165 break;
8166 }
8167
8168 skb_put(skb, H2C_PKT_DROP_LEN);
8169 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
8170 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
8171 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
8172 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
8173 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
8174 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
8175 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
8176 params->macid_band_sel[0]);
8177 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
8178 params->macid_band_sel[1]);
8179 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
8180 params->macid_band_sel[2]);
8181 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
8182 params->macid_band_sel[3]);
8183
8184 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8185 H2C_CAT_MAC,
8186 H2C_CL_MAC_FW_OFLD,
8187 H2C_FUNC_PKT_DROP, 0, 0,
8188 H2C_PKT_DROP_LEN);
8189
8190 ret = rtw89_h2c_tx(rtwdev, skb, false);
8191 if (ret) {
8192 rtw89_err(rtwdev, "failed to send h2c\n");
8193 goto fail;
8194 }
8195
8196 return 0;
8197
8198 fail:
8199 dev_kfree_skb_any(skb);
8200 return ret;
8201 }
8202
8203 #define H2C_KEEP_ALIVE_LEN 4
rtw89_fw_h2c_keep_alive(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8204 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
8205 bool enable)
8206 {
8207 struct sk_buff *skb;
8208 u8 pkt_id = 0;
8209 int ret;
8210
8211 if (enable) {
8212 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
8213 RTW89_PKT_OFLD_TYPE_NULL_DATA,
8214 &pkt_id);
8215 if (ret)
8216 return -EPERM;
8217 }
8218
8219 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
8220 if (!skb) {
8221 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
8222 return -ENOMEM;
8223 }
8224
8225 skb_put(skb, H2C_KEEP_ALIVE_LEN);
8226
8227 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
8228 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
8229 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
8230 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id);
8231
8232 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8233 H2C_CAT_MAC,
8234 H2C_CL_MAC_WOW,
8235 H2C_FUNC_KEEP_ALIVE, 0, 1,
8236 H2C_KEEP_ALIVE_LEN);
8237
8238 ret = rtw89_h2c_tx(rtwdev, skb, false);
8239 if (ret) {
8240 rtw89_err(rtwdev, "failed to send h2c\n");
8241 goto fail;
8242 }
8243
8244 return 0;
8245
8246 fail:
8247 dev_kfree_skb_any(skb);
8248
8249 return ret;
8250 }
8251
rtw89_fw_h2c_arp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8252 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
8253 bool enable)
8254 {
8255 struct rtw89_h2c_arp_offload *h2c;
8256 u32 len = sizeof(*h2c);
8257 struct sk_buff *skb;
8258 u8 pkt_id = 0;
8259 int ret;
8260
8261 if (enable) {
8262 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
8263 RTW89_PKT_OFLD_TYPE_ARP_RSP,
8264 &pkt_id);
8265 if (ret)
8266 return ret;
8267 }
8268
8269 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8270 if (!skb) {
8271 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n");
8272 return -ENOMEM;
8273 }
8274
8275 skb_put(skb, len);
8276 h2c = (struct rtw89_h2c_arp_offload *)skb->data;
8277
8278 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) |
8279 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) |
8280 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
8281 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID);
8282
8283 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8284 H2C_CAT_MAC,
8285 H2C_CL_MAC_WOW,
8286 H2C_FUNC_ARP_OFLD, 0, 1,
8287 len);
8288
8289 ret = rtw89_h2c_tx(rtwdev, skb, false);
8290 if (ret) {
8291 rtw89_err(rtwdev, "failed to send h2c\n");
8292 goto fail;
8293 }
8294
8295 return 0;
8296
8297 fail:
8298 dev_kfree_skb_any(skb);
8299
8300 return ret;
8301 }
8302
8303 #define H2C_DISCONNECT_DETECT_LEN 8
rtw89_fw_h2c_disconnect_detect(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8304 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
8305 struct rtw89_vif_link *rtwvif_link, bool enable)
8306 {
8307 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8308 struct sk_buff *skb;
8309 u8 macid = rtwvif_link->mac_id;
8310 int ret;
8311
8312 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
8313 if (!skb) {
8314 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
8315 return -ENOMEM;
8316 }
8317
8318 skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
8319
8320 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
8321 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
8322 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
8323 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
8324 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
8325 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
8326 }
8327
8328 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8329 H2C_CAT_MAC,
8330 H2C_CL_MAC_WOW,
8331 H2C_FUNC_DISCONNECT_DETECT, 0, 1,
8332 H2C_DISCONNECT_DETECT_LEN);
8333
8334 ret = rtw89_h2c_tx(rtwdev, skb, false);
8335 if (ret) {
8336 rtw89_err(rtwdev, "failed to send h2c\n");
8337 goto fail;
8338 }
8339
8340 return 0;
8341
8342 fail:
8343 dev_kfree_skb_any(skb);
8344
8345 return ret;
8346 }
8347
rtw89_fw_h2c_cfg_pno(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8348 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
8349 bool enable)
8350 {
8351 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8352 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
8353 struct rtw89_h2c_cfg_nlo *h2c;
8354 u32 len = sizeof(*h2c);
8355 struct sk_buff *skb;
8356 int ret, i;
8357
8358 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8359 if (!skb) {
8360 rtw89_err(rtwdev, "failed to alloc skb for nlo\n");
8361 return -ENOMEM;
8362 }
8363
8364 skb_put(skb, len);
8365 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data;
8366
8367 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) |
8368 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) |
8369 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID);
8370
8371 if (enable) {
8372 h2c->nlo_cnt = nd_config->n_match_sets;
8373 for (i = 0 ; i < nd_config->n_match_sets; i++) {
8374 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len;
8375 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid,
8376 nd_config->match_sets[i].ssid.ssid_len);
8377 }
8378 }
8379
8380 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8381 H2C_CAT_MAC,
8382 H2C_CL_MAC_WOW,
8383 H2C_FUNC_NLO, 0, 1,
8384 len);
8385
8386 ret = rtw89_h2c_tx(rtwdev, skb, false);
8387 if (ret) {
8388 rtw89_err(rtwdev, "failed to send h2c\n");
8389 goto fail;
8390 }
8391
8392 return 0;
8393
8394 fail:
8395 dev_kfree_skb_any(skb);
8396 return ret;
8397 }
8398
rtw89_fw_h2c_wow_global(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8399 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
8400 bool enable)
8401 {
8402 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8403 struct rtw89_h2c_wow_global *h2c;
8404 u8 macid = rtwvif_link->mac_id;
8405 u32 len = sizeof(*h2c);
8406 struct sk_buff *skb;
8407 int ret;
8408
8409 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8410 if (!skb) {
8411 rtw89_err(rtwdev, "failed to alloc skb for wow global\n");
8412 return -ENOMEM;
8413 }
8414
8415 skb_put(skb, len);
8416 h2c = (struct rtw89_h2c_wow_global *)skb->data;
8417
8418 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) |
8419 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) |
8420 le32_encode_bits(rtw_wow->ptk_alg,
8421 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) |
8422 le32_encode_bits(rtw_wow->gtk_alg,
8423 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO);
8424 h2c->key_info = rtw_wow->key_info;
8425
8426 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8427 H2C_CAT_MAC,
8428 H2C_CL_MAC_WOW,
8429 H2C_FUNC_WOW_GLOBAL, 0, 1,
8430 len);
8431
8432 ret = rtw89_h2c_tx(rtwdev, skb, false);
8433 if (ret) {
8434 rtw89_err(rtwdev, "failed to send h2c\n");
8435 goto fail;
8436 }
8437
8438 return 0;
8439
8440 fail:
8441 dev_kfree_skb_any(skb);
8442
8443 return ret;
8444 }
8445
8446 #define H2C_WAKEUP_CTRL_LEN 4
rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8447 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
8448 struct rtw89_vif_link *rtwvif_link,
8449 bool enable)
8450 {
8451 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8452 struct sk_buff *skb;
8453 u8 macid = rtwvif_link->mac_id;
8454 int ret;
8455
8456 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
8457 if (!skb) {
8458 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n");
8459 return -ENOMEM;
8460 }
8461
8462 skb_put(skb, H2C_WAKEUP_CTRL_LEN);
8463
8464 if (rtw_wow->pattern_cnt)
8465 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
8466 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
8467 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
8468 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
8469 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
8470
8471 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
8472
8473 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8474 H2C_CAT_MAC,
8475 H2C_CL_MAC_WOW,
8476 H2C_FUNC_WAKEUP_CTRL, 0, 1,
8477 H2C_WAKEUP_CTRL_LEN);
8478
8479 ret = rtw89_h2c_tx(rtwdev, skb, false);
8480 if (ret) {
8481 rtw89_err(rtwdev, "failed to send h2c\n");
8482 goto fail;
8483 }
8484
8485 return 0;
8486
8487 fail:
8488 dev_kfree_skb_any(skb);
8489
8490 return ret;
8491 }
8492
8493 #define H2C_WOW_CAM_UPD_LEN 24
rtw89_fw_wow_cam_update(struct rtw89_dev * rtwdev,struct rtw89_wow_cam_info * cam_info)8494 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
8495 struct rtw89_wow_cam_info *cam_info)
8496 {
8497 struct sk_buff *skb;
8498 int ret;
8499
8500 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN);
8501 if (!skb) {
8502 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
8503 return -ENOMEM;
8504 }
8505
8506 skb_put(skb, H2C_WOW_CAM_UPD_LEN);
8507
8508 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w);
8509 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx);
8510 if (cam_info->valid) {
8511 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]);
8512 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]);
8513 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]);
8514 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]);
8515 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc);
8516 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data,
8517 cam_info->negative_pattern_match);
8518 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data,
8519 cam_info->skip_mac_hdr);
8520 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc);
8521 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc);
8522 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc);
8523 }
8524 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid);
8525
8526 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8527 H2C_CAT_MAC,
8528 H2C_CL_MAC_WOW,
8529 H2C_FUNC_WOW_CAM_UPD, 0, 1,
8530 H2C_WOW_CAM_UPD_LEN);
8531
8532 ret = rtw89_h2c_tx(rtwdev, skb, false);
8533 if (ret) {
8534 rtw89_err(rtwdev, "failed to send h2c\n");
8535 goto fail;
8536 }
8537
8538 return 0;
8539 fail:
8540 dev_kfree_skb_any(skb);
8541
8542 return ret;
8543 }
8544
rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8545 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
8546 struct rtw89_vif_link *rtwvif_link,
8547 bool enable)
8548 {
8549 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8550 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
8551 struct rtw89_h2c_wow_gtk_ofld *h2c;
8552 u8 macid = rtwvif_link->mac_id;
8553 u32 len = sizeof(*h2c);
8554 u8 pkt_id_sa_query = 0;
8555 struct sk_buff *skb;
8556 u8 pkt_id_eapol = 0;
8557 int ret;
8558
8559 if (!rtw_wow->gtk_alg)
8560 return 0;
8561
8562 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8563 if (!skb) {
8564 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n");
8565 return -ENOMEM;
8566 }
8567
8568 skb_put(skb, len);
8569 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data;
8570
8571 if (!enable)
8572 goto hdr;
8573
8574 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
8575 RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
8576 &pkt_id_eapol);
8577 if (ret)
8578 goto fail;
8579
8580 if (gtk_info->igtk_keyid) {
8581 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
8582 RTW89_PKT_OFLD_TYPE_SA_QUERY,
8583 &pkt_id_sa_query);
8584 if (ret)
8585 goto fail;
8586 }
8587
8588 /* not support TKIP yet */
8589 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
8590 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
8591 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
8592 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
8593 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
8594 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID);
8595 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0,
8596 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) |
8597 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT);
8598 h2c->gtk_info = rtw_wow->gtk_info;
8599
8600 hdr:
8601 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8602 H2C_CAT_MAC,
8603 H2C_CL_MAC_WOW,
8604 H2C_FUNC_GTK_OFLD, 0, 1,
8605 len);
8606
8607 ret = rtw89_h2c_tx(rtwdev, skb, false);
8608 if (ret) {
8609 rtw89_err(rtwdev, "failed to send h2c\n");
8610 goto fail;
8611 }
8612 return 0;
8613 fail:
8614 dev_kfree_skb_any(skb);
8615
8616 return ret;
8617 }
8618
rtw89_fw_h2c_fwips(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)8619 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
8620 bool enable)
8621 {
8622 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
8623 struct rtw89_h2c_fwips *h2c;
8624 u32 len = sizeof(*h2c);
8625 struct sk_buff *skb;
8626
8627 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8628 if (!skb) {
8629 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n");
8630 return -ENOMEM;
8631 }
8632 skb_put(skb, len);
8633 h2c = (struct rtw89_h2c_fwips *)skb->data;
8634
8635 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
8636 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE);
8637
8638 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8639 H2C_CAT_MAC,
8640 H2C_CL_MAC_PS,
8641 H2C_FUNC_IPS_CFG, 0, 1,
8642 len);
8643
8644 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
8645 }
8646
rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev * rtwdev)8647 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
8648 {
8649 struct rtw89_wait_info *wait = &rtwdev->wow.wait;
8650 struct rtw89_h2c_wow_aoac *h2c;
8651 u32 len = sizeof(*h2c);
8652 struct sk_buff *skb;
8653
8654 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
8655 if (!skb) {
8656 rtw89_err(rtwdev, "failed to alloc skb for aoac\n");
8657 return -ENOMEM;
8658 }
8659
8660 skb_put(skb, len);
8661
8662 /* This H2C only nofity firmware to generate AOAC report C2H,
8663 * no need any parameter.
8664 */
8665 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8666 H2C_CAT_MAC,
8667 H2C_CL_MAC_WOW,
8668 H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
8669 len);
8670
8671 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
8672 }
8673
8674 /* Return < 0, if failures happen during waiting for the condition.
8675 * Return 0, when waiting for the condition succeeds.
8676 * Return > 0, if the wait is considered unreachable due to driver/FW design,
8677 * where 1 means during SER.
8678 */
rtw89_h2c_tx_and_wait(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct rtw89_wait_info * wait,unsigned int cond)8679 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
8680 struct rtw89_wait_info *wait, unsigned int cond)
8681 {
8682 int ret;
8683
8684 ret = rtw89_h2c_tx(rtwdev, skb, false);
8685 if (ret) {
8686 rtw89_err(rtwdev, "failed to send h2c\n");
8687 dev_kfree_skb_any(skb);
8688 return -EBUSY;
8689 }
8690
8691 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
8692 return 1;
8693
8694 return rtw89_wait_for_cond(wait, cond);
8695 }
8696
8697 #define H2C_ADD_MCC_LEN 16
rtw89_fw_h2c_add_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_add_req * p)8698 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
8699 const struct rtw89_fw_mcc_add_req *p)
8700 {
8701 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8702 struct sk_buff *skb;
8703 unsigned int cond;
8704
8705 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
8706 if (!skb) {
8707 rtw89_err(rtwdev,
8708 "failed to alloc skb for add mcc\n");
8709 return -ENOMEM;
8710 }
8711
8712 skb_put(skb, H2C_ADD_MCC_LEN);
8713 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
8714 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
8715 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
8716 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
8717 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
8718 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
8719 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
8720 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
8721 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
8722 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
8723 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
8724 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
8725 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
8726 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
8727 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
8728 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
8729 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
8730 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
8731 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
8732 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
8733
8734 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8735 H2C_CAT_MAC,
8736 H2C_CL_MCC,
8737 H2C_FUNC_ADD_MCC, 0, 0,
8738 H2C_ADD_MCC_LEN);
8739
8740 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
8741 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8742 }
8743
8744 #define H2C_START_MCC_LEN 12
rtw89_fw_h2c_start_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_start_req * p)8745 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
8746 const struct rtw89_fw_mcc_start_req *p)
8747 {
8748 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8749 struct sk_buff *skb;
8750 unsigned int cond;
8751
8752 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
8753 if (!skb) {
8754 rtw89_err(rtwdev,
8755 "failed to alloc skb for start mcc\n");
8756 return -ENOMEM;
8757 }
8758
8759 skb_put(skb, H2C_START_MCC_LEN);
8760 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
8761 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
8762 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
8763 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
8764 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
8765 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
8766 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
8767 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
8768 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
8769
8770 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8771 H2C_CAT_MAC,
8772 H2C_CL_MCC,
8773 H2C_FUNC_START_MCC, 0, 0,
8774 H2C_START_MCC_LEN);
8775
8776 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
8777 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8778 }
8779
8780 #define H2C_STOP_MCC_LEN 4
rtw89_fw_h2c_stop_mcc(struct rtw89_dev * rtwdev,u8 group,u8 macid,bool prev_groups)8781 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
8782 bool prev_groups)
8783 {
8784 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8785 struct sk_buff *skb;
8786 unsigned int cond;
8787
8788 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
8789 if (!skb) {
8790 rtw89_err(rtwdev,
8791 "failed to alloc skb for stop mcc\n");
8792 return -ENOMEM;
8793 }
8794
8795 skb_put(skb, H2C_STOP_MCC_LEN);
8796 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
8797 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
8798 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
8799
8800 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8801 H2C_CAT_MAC,
8802 H2C_CL_MCC,
8803 H2C_FUNC_STOP_MCC, 0, 0,
8804 H2C_STOP_MCC_LEN);
8805
8806 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
8807 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8808 }
8809
8810 #define H2C_DEL_MCC_GROUP_LEN 4
rtw89_fw_h2c_del_mcc_group(struct rtw89_dev * rtwdev,u8 group,bool prev_groups)8811 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
8812 bool prev_groups)
8813 {
8814 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8815 struct sk_buff *skb;
8816 unsigned int cond;
8817
8818 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
8819 if (!skb) {
8820 rtw89_err(rtwdev,
8821 "failed to alloc skb for del mcc group\n");
8822 return -ENOMEM;
8823 }
8824
8825 skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
8826 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
8827 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
8828
8829 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8830 H2C_CAT_MAC,
8831 H2C_CL_MCC,
8832 H2C_FUNC_DEL_MCC_GROUP, 0, 0,
8833 H2C_DEL_MCC_GROUP_LEN);
8834
8835 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
8836 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8837 }
8838
8839 #define H2C_RESET_MCC_GROUP_LEN 4
rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev * rtwdev,u8 group)8840 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
8841 {
8842 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8843 struct sk_buff *skb;
8844 unsigned int cond;
8845
8846 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
8847 if (!skb) {
8848 rtw89_err(rtwdev,
8849 "failed to alloc skb for reset mcc group\n");
8850 return -ENOMEM;
8851 }
8852
8853 skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
8854 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
8855
8856 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8857 H2C_CAT_MAC,
8858 H2C_CL_MCC,
8859 H2C_FUNC_RESET_MCC_GROUP, 0, 0,
8860 H2C_RESET_MCC_GROUP_LEN);
8861
8862 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
8863 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8864 }
8865
8866 #define H2C_MCC_REQ_TSF_LEN 4
rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_tsf_req * req,struct rtw89_mac_mcc_tsf_rpt * rpt)8867 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
8868 const struct rtw89_fw_mcc_tsf_req *req,
8869 struct rtw89_mac_mcc_tsf_rpt *rpt)
8870 {
8871 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8872 struct rtw89_mac_mcc_tsf_rpt *tmp;
8873 struct sk_buff *skb;
8874 unsigned int cond;
8875 int ret;
8876
8877 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
8878 if (!skb) {
8879 rtw89_err(rtwdev,
8880 "failed to alloc skb for mcc req tsf\n");
8881 return -ENOMEM;
8882 }
8883
8884 skb_put(skb, H2C_MCC_REQ_TSF_LEN);
8885 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
8886 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
8887 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
8888
8889 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8890 H2C_CAT_MAC,
8891 H2C_CL_MCC,
8892 H2C_FUNC_MCC_REQ_TSF, 0, 0,
8893 H2C_MCC_REQ_TSF_LEN);
8894
8895 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
8896 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8897 if (ret)
8898 return ret;
8899
8900 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
8901 *rpt = *tmp;
8902
8903 return 0;
8904 }
8905
8906 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev * rtwdev,u8 group,u8 macid,u8 * bitmap)8907 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid,
8908 u8 *bitmap)
8909 {
8910 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8911 struct sk_buff *skb;
8912 unsigned int cond;
8913 u8 map_len;
8914 u8 h2c_len;
8915
8916 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
8917 map_len = RTW89_MAX_MAC_ID_NUM / 8;
8918 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
8919 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
8920 if (!skb) {
8921 rtw89_err(rtwdev,
8922 "failed to alloc skb for mcc macid bitmap\n");
8923 return -ENOMEM;
8924 }
8925
8926 skb_put(skb, h2c_len);
8927 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
8928 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
8929 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
8930 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
8931
8932 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8933 H2C_CAT_MAC,
8934 H2C_CL_MCC,
8935 H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
8936 h2c_len);
8937
8938 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
8939 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8940 }
8941
8942 #define H2C_MCC_SYNC_LEN 4
rtw89_fw_h2c_mcc_sync(struct rtw89_dev * rtwdev,u8 group,u8 source,u8 target,u8 offset)8943 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
8944 u8 target, u8 offset)
8945 {
8946 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8947 struct sk_buff *skb;
8948 unsigned int cond;
8949
8950 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
8951 if (!skb) {
8952 rtw89_err(rtwdev,
8953 "failed to alloc skb for mcc sync\n");
8954 return -ENOMEM;
8955 }
8956
8957 skb_put(skb, H2C_MCC_SYNC_LEN);
8958 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
8959 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
8960 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
8961 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
8962
8963 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
8964 H2C_CAT_MAC,
8965 H2C_CL_MCC,
8966 H2C_FUNC_MCC_SYNC, 0, 0,
8967 H2C_MCC_SYNC_LEN);
8968
8969 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
8970 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
8971 }
8972
8973 #define H2C_MCC_SET_DURATION_LEN 20
rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_duration * p)8974 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
8975 const struct rtw89_fw_mcc_duration *p)
8976 {
8977 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
8978 struct sk_buff *skb;
8979 unsigned int cond;
8980
8981 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
8982 if (!skb) {
8983 rtw89_err(rtwdev,
8984 "failed to alloc skb for mcc set duration\n");
8985 return -ENOMEM;
8986 }
8987
8988 skb_put(skb, H2C_MCC_SET_DURATION_LEN);
8989 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
8990 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
8991 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
8992 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
8993 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
8994 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
8995 p->start_tsf_low);
8996 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
8997 p->start_tsf_high);
8998 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
8999 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
9000
9001 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9002 H2C_CAT_MAC,
9003 H2C_CL_MCC,
9004 H2C_FUNC_MCC_SET_DURATION, 0, 0,
9005 H2C_MCC_SET_DURATION_LEN);
9006
9007 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
9008 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
9009 }
9010
9011 static
rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_add_slot_arg * slot_arg,struct rtw89_h2c_mrc_add_slot * slot_h2c)9012 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
9013 const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
9014 struct rtw89_h2c_mrc_add_slot *slot_h2c)
9015 {
9016 bool fill_h2c = !!slot_h2c;
9017 unsigned int i;
9018
9019 if (!fill_h2c)
9020 goto calc_len;
9021
9022 slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
9023 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
9024 le32_encode_bits(slot_arg->courtesy_en,
9025 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
9026 le32_encode_bits(slot_arg->role_num,
9027 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
9028 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
9029 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
9030 le32_encode_bits(slot_arg->courtesy_target,
9031 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
9032
9033 for (i = 0; i < slot_arg->role_num; i++) {
9034 slot_h2c->roles[i].w0 =
9035 le32_encode_bits(slot_arg->roles[i].macid,
9036 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
9037 le32_encode_bits(slot_arg->roles[i].role_type,
9038 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
9039 le32_encode_bits(slot_arg->roles[i].is_master,
9040 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
9041 le32_encode_bits(slot_arg->roles[i].en_tx_null,
9042 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
9043 le32_encode_bits(false,
9044 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
9045 le32_encode_bits(false,
9046 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
9047 slot_h2c->roles[i].w1 =
9048 le32_encode_bits(slot_arg->roles[i].central_ch,
9049 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
9050 le32_encode_bits(slot_arg->roles[i].primary_ch,
9051 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
9052 le32_encode_bits(slot_arg->roles[i].bw,
9053 RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
9054 le32_encode_bits(slot_arg->roles[i].band,
9055 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
9056 le32_encode_bits(slot_arg->roles[i].null_early,
9057 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
9058 le32_encode_bits(false,
9059 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
9060 le32_encode_bits(true,
9061 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
9062 slot_h2c->roles[i].macid_main_bitmap =
9063 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
9064 slot_h2c->roles[i].macid_paired_bitmap =
9065 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
9066 }
9067
9068 calc_len:
9069 return struct_size(slot_h2c, roles, slot_arg->role_num);
9070 }
9071
rtw89_fw_h2c_mrc_add(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_add_arg * arg)9072 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
9073 const struct rtw89_fw_mrc_add_arg *arg)
9074 {
9075 struct rtw89_h2c_mrc_add *h2c_head;
9076 struct sk_buff *skb;
9077 unsigned int i;
9078 void *tmp;
9079 u32 len;
9080 int ret;
9081
9082 len = sizeof(*h2c_head);
9083 for (i = 0; i < arg->slot_num; i++)
9084 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
9085
9086 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9087 if (!skb) {
9088 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
9089 return -ENOMEM;
9090 }
9091
9092 skb_put(skb, len);
9093 tmp = skb->data;
9094
9095 h2c_head = tmp;
9096 h2c_head->w0 = le32_encode_bits(arg->sch_idx,
9097 RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
9098 le32_encode_bits(arg->sch_type,
9099 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
9100 le32_encode_bits(arg->slot_num,
9101 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
9102 le32_encode_bits(arg->btc_in_sch,
9103 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
9104
9105 tmp += sizeof(*h2c_head);
9106 for (i = 0; i < arg->slot_num; i++)
9107 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
9108
9109 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9110 H2C_CAT_MAC,
9111 H2C_CL_MRC,
9112 H2C_FUNC_ADD_MRC, 0, 0,
9113 len);
9114
9115 ret = rtw89_h2c_tx(rtwdev, skb, false);
9116 if (ret) {
9117 rtw89_err(rtwdev, "failed to send h2c\n");
9118 dev_kfree_skb_any(skb);
9119 return -EBUSY;
9120 }
9121
9122 return 0;
9123 }
9124
rtw89_fw_h2c_mrc_start(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_start_arg * arg)9125 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
9126 const struct rtw89_fw_mrc_start_arg *arg)
9127 {
9128 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
9129 struct rtw89_h2c_mrc_start *h2c;
9130 u32 len = sizeof(*h2c);
9131 struct sk_buff *skb;
9132 unsigned int cond;
9133
9134 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9135 if (!skb) {
9136 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
9137 return -ENOMEM;
9138 }
9139
9140 skb_put(skb, len);
9141 h2c = (struct rtw89_h2c_mrc_start *)skb->data;
9142
9143 h2c->w0 = le32_encode_bits(arg->sch_idx,
9144 RTW89_H2C_MRC_START_W0_SCH_IDX) |
9145 le32_encode_bits(arg->old_sch_idx,
9146 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
9147 le32_encode_bits(arg->action,
9148 RTW89_H2C_MRC_START_W0_ACTION);
9149
9150 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
9151 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
9152
9153 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9154 H2C_CAT_MAC,
9155 H2C_CL_MRC,
9156 H2C_FUNC_START_MRC, 0, 0,
9157 len);
9158
9159 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
9160 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
9161 }
9162
rtw89_fw_h2c_mrc_del(struct rtw89_dev * rtwdev,u8 sch_idx,u8 slot_idx)9163 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx)
9164 {
9165 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
9166 struct rtw89_h2c_mrc_del *h2c;
9167 u32 len = sizeof(*h2c);
9168 struct sk_buff *skb;
9169 unsigned int cond;
9170
9171 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9172 if (!skb) {
9173 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
9174 return -ENOMEM;
9175 }
9176
9177 skb_put(skb, len);
9178 h2c = (struct rtw89_h2c_mrc_del *)skb->data;
9179
9180 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) |
9181 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX);
9182
9183 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9184 H2C_CAT_MAC,
9185 H2C_CL_MRC,
9186 H2C_FUNC_DEL_MRC, 0, 0,
9187 len);
9188
9189 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
9190 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
9191 }
9192
rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_req_tsf_arg * arg,struct rtw89_mac_mrc_tsf_rpt * rpt)9193 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
9194 const struct rtw89_fw_mrc_req_tsf_arg *arg,
9195 struct rtw89_mac_mrc_tsf_rpt *rpt)
9196 {
9197 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
9198 struct rtw89_h2c_mrc_req_tsf *h2c;
9199 struct rtw89_mac_mrc_tsf_rpt *tmp;
9200 struct sk_buff *skb;
9201 unsigned int i;
9202 u32 len;
9203 int ret;
9204
9205 len = struct_size(h2c, infos, arg->num);
9206 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9207 if (!skb) {
9208 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
9209 return -ENOMEM;
9210 }
9211
9212 skb_put(skb, len);
9213 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
9214
9215 h2c->req_tsf_num = arg->num;
9216 for (i = 0; i < arg->num; i++)
9217 h2c->infos[i] =
9218 u8_encode_bits(arg->infos[i].band,
9219 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
9220 u8_encode_bits(arg->infos[i].port,
9221 RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
9222
9223 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9224 H2C_CAT_MAC,
9225 H2C_CL_MRC,
9226 H2C_FUNC_MRC_REQ_TSF, 0, 0,
9227 len);
9228
9229 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
9230 if (ret)
9231 return ret;
9232
9233 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
9234 *rpt = *tmp;
9235
9236 return 0;
9237 }
9238
rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_upd_bitmap_arg * arg)9239 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
9240 const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
9241 {
9242 struct rtw89_h2c_mrc_upd_bitmap *h2c;
9243 u32 len = sizeof(*h2c);
9244 struct sk_buff *skb;
9245 int ret;
9246
9247 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9248 if (!skb) {
9249 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
9250 return -ENOMEM;
9251 }
9252
9253 skb_put(skb, len);
9254 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
9255
9256 h2c->w0 = le32_encode_bits(arg->sch_idx,
9257 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
9258 le32_encode_bits(arg->action,
9259 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
9260 le32_encode_bits(arg->macid,
9261 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
9262 h2c->w1 = le32_encode_bits(arg->client_macid,
9263 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
9264
9265 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9266 H2C_CAT_MAC,
9267 H2C_CL_MRC,
9268 H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
9269 len);
9270
9271 ret = rtw89_h2c_tx(rtwdev, skb, false);
9272 if (ret) {
9273 rtw89_err(rtwdev, "failed to send h2c\n");
9274 dev_kfree_skb_any(skb);
9275 return -EBUSY;
9276 }
9277
9278 return 0;
9279 }
9280
rtw89_fw_h2c_mrc_sync(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_sync_arg * arg)9281 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
9282 const struct rtw89_fw_mrc_sync_arg *arg)
9283 {
9284 struct rtw89_h2c_mrc_sync *h2c;
9285 u32 len = sizeof(*h2c);
9286 struct sk_buff *skb;
9287 int ret;
9288
9289 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9290 if (!skb) {
9291 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
9292 return -ENOMEM;
9293 }
9294
9295 skb_put(skb, len);
9296 h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
9297
9298 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
9299 le32_encode_bits(arg->src.port,
9300 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
9301 le32_encode_bits(arg->src.band,
9302 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
9303 le32_encode_bits(arg->dest.port,
9304 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
9305 le32_encode_bits(arg->dest.band,
9306 RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
9307 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
9308
9309 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9310 H2C_CAT_MAC,
9311 H2C_CL_MRC,
9312 H2C_FUNC_MRC_SYNC, 0, 0,
9313 len);
9314
9315 ret = rtw89_h2c_tx(rtwdev, skb, false);
9316 if (ret) {
9317 rtw89_err(rtwdev, "failed to send h2c\n");
9318 dev_kfree_skb_any(skb);
9319 return -EBUSY;
9320 }
9321
9322 return 0;
9323 }
9324
rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_upd_duration_arg * arg)9325 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
9326 const struct rtw89_fw_mrc_upd_duration_arg *arg)
9327 {
9328 struct rtw89_h2c_mrc_upd_duration *h2c;
9329 struct sk_buff *skb;
9330 unsigned int i;
9331 u32 len;
9332 int ret;
9333
9334 len = struct_size(h2c, slots, arg->slot_num);
9335 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9336 if (!skb) {
9337 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
9338 return -ENOMEM;
9339 }
9340
9341 skb_put(skb, len);
9342 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
9343
9344 h2c->w0 = le32_encode_bits(arg->sch_idx,
9345 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
9346 le32_encode_bits(arg->slot_num,
9347 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
9348 le32_encode_bits(false,
9349 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
9350
9351 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
9352 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
9353
9354 for (i = 0; i < arg->slot_num; i++) {
9355 h2c->slots[i] =
9356 le32_encode_bits(arg->slots[i].slot_idx,
9357 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
9358 le32_encode_bits(arg->slots[i].duration,
9359 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
9360 }
9361
9362 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9363 H2C_CAT_MAC,
9364 H2C_CL_MRC,
9365 H2C_FUNC_MRC_UPD_DURATION, 0, 0,
9366 len);
9367
9368 ret = rtw89_h2c_tx(rtwdev, skb, false);
9369 if (ret) {
9370 rtw89_err(rtwdev, "failed to send h2c\n");
9371 dev_kfree_skb_any(skb);
9372 return -EBUSY;
9373 }
9374
9375 return 0;
9376 }
9377
rtw89_fw_h2c_ap_info(struct rtw89_dev * rtwdev,bool en)9378 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
9379 {
9380 struct rtw89_h2c_ap_info *h2c;
9381 u32 len = sizeof(*h2c);
9382 struct sk_buff *skb;
9383 int ret;
9384
9385 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9386 if (!skb) {
9387 rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
9388 return -ENOMEM;
9389 }
9390
9391 skb_put(skb, len);
9392 h2c = (struct rtw89_h2c_ap_info *)skb->data;
9393
9394 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
9395
9396 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9397 H2C_CAT_MAC,
9398 H2C_CL_AP,
9399 H2C_FUNC_AP_INFO, 0, 0,
9400 len);
9401
9402 ret = rtw89_h2c_tx(rtwdev, skb, false);
9403 if (ret) {
9404 rtw89_err(rtwdev, "failed to send h2c\n");
9405 dev_kfree_skb_any(skb);
9406 return -EBUSY;
9407 }
9408
9409 return 0;
9410 }
9411
rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev * rtwdev,bool en)9412 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
9413 {
9414 int ret;
9415
9416 if (en) {
9417 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
9418 return 0;
9419 } else {
9420 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
9421 return 0;
9422 }
9423
9424 ret = rtw89_fw_h2c_ap_info(rtwdev, en);
9425 if (ret) {
9426 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
9427 return ret;
9428
9429 /* During recovery, neither driver nor stack has full error
9430 * handling, so show a warning, but return 0 with refcount
9431 * increased normally. It can avoid underflow when calling
9432 * with @en == false later.
9433 */
9434 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
9435 }
9436
9437 if (en)
9438 refcount_set(&rtwdev->refcount_ap_info, 1);
9439
9440 return 0;
9441 }
9442
rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9443 int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
9444 bool enable)
9445 {
9446 struct rtw89_wait_info *wait = &rtwdev->mlo.wait;
9447 struct rtw89_h2c_mlo_link_cfg *h2c;
9448 u8 mac_id = rtwvif_link->mac_id;
9449 u32 len = sizeof(*h2c);
9450 struct sk_buff *skb;
9451 unsigned int cond;
9452 int ret;
9453
9454 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9455 if (!skb) {
9456 rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n");
9457 return -ENOMEM;
9458 }
9459
9460 skb_put(skb, len);
9461 h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data;
9462
9463 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) |
9464 le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION);
9465
9466 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9467 H2C_CAT_MAC,
9468 H2C_CL_MLO,
9469 H2C_FUNC_MLO_LINK_CFG, 0, 0,
9470 len);
9471
9472 cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG);
9473
9474 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
9475 if (ret) {
9476 rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n",
9477 str_enable_disable(enable), rtwvif_link->link_id, ret);
9478 return ret;
9479 }
9480
9481 return 0;
9482 }
9483
__fw_txpwr_entry_zero_ext(const void * ext_ptr,u8 ext_len)9484 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
9485 {
9486 static const u8 zeros[U8_MAX] = {};
9487
9488 return memcmp(ext_ptr, zeros, ext_len) == 0;
9489 }
9490
9491 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \
9492 ({ \
9493 u8 __var_sz = sizeof(*(e)); \
9494 bool __accept; \
9495 if (__var_sz >= (ent_sz)) \
9496 __accept = true; \
9497 else \
9498 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\
9499 (ent_sz) - __var_sz);\
9500 __accept; \
9501 })
9502
9503 static bool
fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9504 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e,
9505 const void *cursor,
9506 const struct rtw89_txpwr_conf *conf)
9507 {
9508 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9509 return false;
9510
9511 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM)
9512 return false;
9513
9514 switch (e->rs) {
9515 case RTW89_RS_CCK:
9516 if (e->shf + e->len > RTW89_RATE_CCK_NUM)
9517 return false;
9518 break;
9519 case RTW89_RS_OFDM:
9520 if (e->shf + e->len > RTW89_RATE_OFDM_NUM)
9521 return false;
9522 break;
9523 case RTW89_RS_MCS:
9524 if (e->shf + e->len > __RTW89_RATE_MCS_NUM ||
9525 e->nss >= RTW89_NSS_NUM ||
9526 e->ofdma >= RTW89_OFDMA_NUM)
9527 return false;
9528 break;
9529 case RTW89_RS_HEDCM:
9530 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM ||
9531 e->nss >= RTW89_NSS_HEDCM_NUM ||
9532 e->ofdma >= RTW89_OFDMA_NUM)
9533 return false;
9534 break;
9535 case RTW89_RS_OFFSET:
9536 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM)
9537 return false;
9538 break;
9539 default:
9540 return false;
9541 }
9542
9543 return true;
9544 }
9545
9546 static
rtw89_fw_load_txpwr_byrate(struct rtw89_dev * rtwdev,const struct rtw89_txpwr_table * tbl)9547 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev,
9548 const struct rtw89_txpwr_table *tbl)
9549 {
9550 const struct rtw89_txpwr_conf *conf = tbl->data;
9551 struct rtw89_fw_txpwr_byrate_entry entry = {};
9552 struct rtw89_txpwr_byrate *byr_head;
9553 struct rtw89_rate_desc desc = {};
9554 const void *cursor;
9555 u32 data;
9556 s8 *byr;
9557 int i;
9558
9559 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9560 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf))
9561 continue;
9562
9563 byr_head = &rtwdev->byr[entry.band][entry.bw];
9564 data = le32_to_cpu(entry.data);
9565 desc.ofdma = entry.ofdma;
9566 desc.nss = entry.nss;
9567 desc.rs = entry.rs;
9568
9569 for (i = 0; i < entry.len; i++, data >>= 8) {
9570 desc.idx = entry.shf + i;
9571 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
9572 *byr = data & 0xff;
9573 }
9574 }
9575 }
9576
9577 static bool
fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9578 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e,
9579 const void *cursor,
9580 const struct rtw89_txpwr_conf *conf)
9581 {
9582 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9583 return false;
9584
9585 if (e->bw >= RTW89_2G_BW_NUM)
9586 return false;
9587 if (e->nt >= RTW89_NTX_NUM)
9588 return false;
9589 if (e->rs >= RTW89_RS_LMT_NUM)
9590 return false;
9591 if (e->bf >= RTW89_BF_NUM)
9592 return false;
9593 if (e->regd >= RTW89_REGD_NUM)
9594 return false;
9595 if (e->ch_idx >= RTW89_2G_CH_NUM)
9596 return false;
9597
9598 return true;
9599 }
9600
9601 static
rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data * data)9602 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data)
9603 {
9604 const struct rtw89_txpwr_conf *conf = &data->conf;
9605 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {};
9606 const void *cursor;
9607
9608 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9609 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf))
9610 continue;
9611
9612 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
9613 [entry.ch_idx] = entry.v;
9614 }
9615 }
9616
9617 static bool
fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9618 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e,
9619 const void *cursor,
9620 const struct rtw89_txpwr_conf *conf)
9621 {
9622 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9623 return false;
9624
9625 if (e->bw >= RTW89_5G_BW_NUM)
9626 return false;
9627 if (e->nt >= RTW89_NTX_NUM)
9628 return false;
9629 if (e->rs >= RTW89_RS_LMT_NUM)
9630 return false;
9631 if (e->bf >= RTW89_BF_NUM)
9632 return false;
9633 if (e->regd >= RTW89_REGD_NUM)
9634 return false;
9635 if (e->ch_idx >= RTW89_5G_CH_NUM)
9636 return false;
9637
9638 return true;
9639 }
9640
9641 static
rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data * data)9642 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data)
9643 {
9644 const struct rtw89_txpwr_conf *conf = &data->conf;
9645 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {};
9646 const void *cursor;
9647
9648 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9649 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf))
9650 continue;
9651
9652 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
9653 [entry.ch_idx] = entry.v;
9654 }
9655 }
9656
9657 static bool
fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9658 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e,
9659 const void *cursor,
9660 const struct rtw89_txpwr_conf *conf)
9661 {
9662 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9663 return false;
9664
9665 if (e->bw >= RTW89_6G_BW_NUM)
9666 return false;
9667 if (e->nt >= RTW89_NTX_NUM)
9668 return false;
9669 if (e->rs >= RTW89_RS_LMT_NUM)
9670 return false;
9671 if (e->bf >= RTW89_BF_NUM)
9672 return false;
9673 if (e->regd >= RTW89_REGD_NUM)
9674 return false;
9675 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
9676 return false;
9677 if (e->ch_idx >= RTW89_6G_CH_NUM)
9678 return false;
9679
9680 return true;
9681 }
9682
9683 static
rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data * data)9684 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data)
9685 {
9686 const struct rtw89_txpwr_conf *conf = &data->conf;
9687 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {};
9688 const void *cursor;
9689
9690 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9691 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf))
9692 continue;
9693
9694 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
9695 [entry.reg_6ghz_power][entry.ch_idx] = entry.v;
9696 }
9697 }
9698
9699 static bool
fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9700 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e,
9701 const void *cursor,
9702 const struct rtw89_txpwr_conf *conf)
9703 {
9704 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9705 return false;
9706
9707 if (e->ru >= RTW89_RU_NUM)
9708 return false;
9709 if (e->nt >= RTW89_NTX_NUM)
9710 return false;
9711 if (e->regd >= RTW89_REGD_NUM)
9712 return false;
9713 if (e->ch_idx >= RTW89_2G_CH_NUM)
9714 return false;
9715
9716 return true;
9717 }
9718
9719 static
rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data * data)9720 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data)
9721 {
9722 const struct rtw89_txpwr_conf *conf = &data->conf;
9723 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {};
9724 const void *cursor;
9725
9726 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9727 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf))
9728 continue;
9729
9730 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
9731 }
9732 }
9733
9734 static bool
fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9735 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e,
9736 const void *cursor,
9737 const struct rtw89_txpwr_conf *conf)
9738 {
9739 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9740 return false;
9741
9742 if (e->ru >= RTW89_RU_NUM)
9743 return false;
9744 if (e->nt >= RTW89_NTX_NUM)
9745 return false;
9746 if (e->regd >= RTW89_REGD_NUM)
9747 return false;
9748 if (e->ch_idx >= RTW89_5G_CH_NUM)
9749 return false;
9750
9751 return true;
9752 }
9753
9754 static
rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data * data)9755 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data)
9756 {
9757 const struct rtw89_txpwr_conf *conf = &data->conf;
9758 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {};
9759 const void *cursor;
9760
9761 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9762 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf))
9763 continue;
9764
9765 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
9766 }
9767 }
9768
9769 static bool
fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9770 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e,
9771 const void *cursor,
9772 const struct rtw89_txpwr_conf *conf)
9773 {
9774 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9775 return false;
9776
9777 if (e->ru >= RTW89_RU_NUM)
9778 return false;
9779 if (e->nt >= RTW89_NTX_NUM)
9780 return false;
9781 if (e->regd >= RTW89_REGD_NUM)
9782 return false;
9783 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
9784 return false;
9785 if (e->ch_idx >= RTW89_6G_CH_NUM)
9786 return false;
9787
9788 return true;
9789 }
9790
9791 static
rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data * data)9792 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data)
9793 {
9794 const struct rtw89_txpwr_conf *conf = &data->conf;
9795 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {};
9796 const void *cursor;
9797
9798 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9799 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf))
9800 continue;
9801
9802 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power]
9803 [entry.ch_idx] = entry.v;
9804 }
9805 }
9806
9807 static bool
fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9808 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e,
9809 const void *cursor,
9810 const struct rtw89_txpwr_conf *conf)
9811 {
9812 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9813 return false;
9814
9815 if (e->band >= RTW89_BAND_NUM)
9816 return false;
9817 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM)
9818 return false;
9819 if (e->regd >= RTW89_REGD_NUM)
9820 return false;
9821
9822 return true;
9823 }
9824
9825 static
rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data * data)9826 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data)
9827 {
9828 const struct rtw89_txpwr_conf *conf = &data->conf;
9829 struct rtw89_fw_tx_shape_lmt_entry entry = {};
9830 const void *cursor;
9831
9832 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9833 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf))
9834 continue;
9835
9836 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v;
9837 }
9838 }
9839
9840 static bool
fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)9841 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e,
9842 const void *cursor,
9843 const struct rtw89_txpwr_conf *conf)
9844 {
9845 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
9846 return false;
9847
9848 if (e->band >= RTW89_BAND_NUM)
9849 return false;
9850 if (e->regd >= RTW89_REGD_NUM)
9851 return false;
9852
9853 return true;
9854 }
9855
9856 static
rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data * data)9857 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
9858 {
9859 const struct rtw89_txpwr_conf *conf = &data->conf;
9860 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {};
9861 const void *cursor;
9862
9863 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
9864 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf))
9865 continue;
9866
9867 data->v[entry.band][entry.regd] = entry.v;
9868 }
9869 }
9870
rtw89_fw_has_da_txpwr_table(struct rtw89_dev * rtwdev,const struct rtw89_rfe_parms * parms)9871 static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev,
9872 const struct rtw89_rfe_parms *parms)
9873 {
9874 const struct rtw89_chip_info *chip = rtwdev->chip;
9875
9876 if (chip->support_bands & BIT(NL80211_BAND_2GHZ) &&
9877 !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru))
9878 return false;
9879
9880 if (chip->support_bands & BIT(NL80211_BAND_5GHZ) &&
9881 !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru))
9882 return false;
9883
9884 if (chip->support_bands & BIT(NL80211_BAND_6GHZ) &&
9885 !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru))
9886 return false;
9887
9888 return true;
9889 }
9890
9891 const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev * rtwdev,const struct rtw89_rfe_parms * init)9892 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
9893 const struct rtw89_rfe_parms *init)
9894 {
9895 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data;
9896 struct rtw89_rfe_parms *parms;
9897
9898 if (!rfe_data)
9899 return init;
9900
9901 parms = &rfe_data->rfe_parms;
9902 if (init)
9903 *parms = *init;
9904
9905 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) {
9906 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf;
9907 rfe_data->byrate.tbl.size = 0; /* don't care here */
9908 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate;
9909 parms->byr_tbl = &rfe_data->byrate.tbl;
9910 }
9911
9912 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) {
9913 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz);
9914 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v;
9915 }
9916
9917 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) {
9918 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz);
9919 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v;
9920 }
9921
9922 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) {
9923 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz);
9924 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
9925 }
9926
9927 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) {
9928 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz);
9929 parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v;
9930 }
9931
9932 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) {
9933 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz);
9934 parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v;
9935 }
9936
9937 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) {
9938 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz);
9939 parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v;
9940 }
9941
9942 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
9943 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
9944 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
9945 }
9946
9947 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) {
9948 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz);
9949 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v;
9950 }
9951
9952 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) {
9953 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz);
9954 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
9955 }
9956
9957 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) {
9958 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz);
9959 parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v;
9960 }
9961
9962 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) {
9963 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz);
9964 parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v;
9965 }
9966
9967 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) {
9968 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz);
9969 parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v;
9970 }
9971
9972 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
9973 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
9974 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
9975 }
9976
9977 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) {
9978 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru);
9979 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
9980 }
9981
9982 parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms);
9983
9984 return parms;
9985 }
9986