1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2019-2020 Realtek Corporation
3 */
4
5 #include <linux/if_arp.h>
6 #include "cam.h"
7 #include "chan.h"
8 #include "coex.h"
9 #include "debug.h"
10 #include "fw.h"
11 #include "mac.h"
12 #include "phy.h"
13 #include "ps.h"
14 #include "reg.h"
15 #include "util.h"
16 #include "wow.h"
17
18 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev);
19
20 struct rtw89_eapol_2_of_2 {
21 u8 gtkbody[14];
22 u8 key_des_ver;
23 u8 rsvd[92];
24 } __packed;
25
26 struct rtw89_sa_query {
27 u8 category;
28 u8 action;
29 } __packed;
30
31 struct rtw89_arp_rsp {
32 u8 llc_hdr[sizeof(rfc1042_header)];
33 __be16 llc_type;
34 struct arphdr arp_hdr;
35 u8 sender_hw[ETH_ALEN];
36 __be32 sender_ip;
37 u8 target_hw[ETH_ALEN];
38 __be32 target_ip;
39 } __packed;
40
41 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
42
43 const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = {
44 .ver = 0x00,
45 .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
46 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
47 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
48 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
49 },
50 };
51 EXPORT_SYMBOL(rtw89_fw_blacklist_default);
52
53 union rtw89_fw_element_arg {
54 size_t offset;
55 enum rtw89_rf_path rf_path;
56 enum rtw89_fw_type fw_type;
57 };
58
59 struct rtw89_fw_element_handler {
60 int (*fn)(struct rtw89_dev *rtwdev,
61 const struct rtw89_fw_element_hdr *elm,
62 const union rtw89_fw_element_arg arg);
63 const union rtw89_fw_element_arg arg;
64 const char *name;
65 };
66
67 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
68 struct sk_buff *skb);
69 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
70 struct rtw89_wait_info *wait, unsigned int cond);
71 static int __parse_security_section(struct rtw89_dev *rtwdev,
72 struct rtw89_fw_bin_info *info,
73 struct rtw89_fw_hdr_section_info *section_info,
74 const void *content,
75 u32 *mssc_len);
76
rtw89_fw_h2c_alloc_skb(struct rtw89_dev * rtwdev,u32 len,bool header)77 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len,
78 bool header)
79 {
80 struct sk_buff *skb;
81 u32 header_len = 0;
82 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size;
83
84 if (header)
85 header_len = H2C_HEADER_LEN;
86
87 skb = dev_alloc_skb(len + header_len + h2c_desc_size);
88 if (!skb)
89 return NULL;
90 skb_reserve(skb, header_len + h2c_desc_size);
91 memset(skb->data, 0, len);
92
93 return skb;
94 }
95
rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev * rtwdev,u32 len)96 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len)
97 {
98 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true);
99 }
100
rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev * rtwdev,u32 len)101 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len)
102 {
103 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false);
104 }
105
rtw89_fw_check_rdy(struct rtw89_dev * rtwdev,enum rtw89_fwdl_check_type type)106 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type)
107 {
108 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
109 u8 val;
110 int ret;
111
112 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val,
113 val == RTW89_FWDL_WCPU_FW_INIT_RDY,
114 1, FWDL_WAIT_CNT, false, rtwdev, type);
115 if (ret) {
116 switch (val) {
117 case RTW89_FWDL_CHECKSUM_FAIL:
118 rtw89_err(rtwdev, "fw checksum fail\n");
119 return -EINVAL;
120
121 case RTW89_FWDL_SECURITY_FAIL:
122 rtw89_err(rtwdev, "fw security fail\n");
123 return -EINVAL;
124
125 case RTW89_FWDL_CV_NOT_MATCH:
126 rtw89_err(rtwdev, "fw cv not match\n");
127 return -EINVAL;
128
129 default:
130 rtw89_err(rtwdev, "fw unexpected status %d\n", val);
131 return -EBUSY;
132 }
133 }
134
135 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
136
137 return 0;
138 }
139
rtw89_fw_hdr_parser_v0(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)140 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
141 struct rtw89_fw_bin_info *info)
142 {
143 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw;
144 const struct rtw89_chip_info *chip = rtwdev->chip;
145 struct rtw89_fw_hdr_section_info *section_info;
146 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
147 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
148 const struct rtw89_fw_hdr_section *section;
149 const u8 *fw_end = fw + len;
150 const u8 *bin;
151 u32 base_hdr_len;
152 u32 mssc_len;
153 int ret;
154 u32 i;
155
156 if (!info)
157 return -EINVAL;
158
159 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM);
160 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
161 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR);
162 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE);
163
164 if (chip->chip_gen == RTW89_CHIP_AX)
165 info->part_size = FWDL_SECTION_PER_PKT_LEN;
166 else
167 info->part_size = le32_get_bits(fw_hdr->w7, FW_HDR_W7_PART_SIZE);
168
169 if (info->dynamic_hdr_en) {
170 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN);
171 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
172 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
173 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
174 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
175 return -EINVAL;
176 }
177 } else {
178 info->hdr_len = base_hdr_len;
179 info->dynamic_hdr_len = 0;
180 }
181
182 bin = fw + info->hdr_len;
183
184 /* jump to section header */
185 section_info = info->section_info;
186 for (i = 0; i < info->section_num; i++) {
187 section = &fw_hdr->sections[i];
188 section_info->type =
189 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE);
190 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE);
191
192 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM))
193 section_info->len += FWDL_SECTION_CHKSUM_LEN;
194 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL);
195 section_info->dladdr =
196 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff;
197 section_info->addr = bin;
198
199 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
200 section_info->mssc =
201 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC);
202
203 ret = __parse_security_section(rtwdev, info, section_info,
204 bin, &mssc_len);
205 if (ret)
206 return ret;
207
208 if (sec->secure_boot && chip->chip_id == RTL8852B)
209 section_info->len_override = 960;
210 } else {
211 section_info->mssc = 0;
212 mssc_len = 0;
213 }
214
215 rtw89_debug(rtwdev, RTW89_DBG_FW,
216 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
217 i, section_info->type, section_info->len,
218 section_info->mssc, mssc_len, bin - fw);
219 rtw89_debug(rtwdev, RTW89_DBG_FW,
220 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
221 section_info->ignore, section_info->key_addr,
222 section_info->key_addr ?
223 section_info->key_addr - section_info->addr : 0,
224 section_info->key_len, section_info->key_idx);
225
226 bin += section_info->len + mssc_len;
227 section_info++;
228 }
229
230 if (fw_end != bin) {
231 rtw89_err(rtwdev, "[ERR]fw bin size\n");
232 return -EINVAL;
233 }
234
235 return 0;
236 }
237
__get_mssc_key_idx(struct rtw89_dev * rtwdev,const struct rtw89_fw_mss_pool_hdr * mss_hdr,u32 rmp_tbl_size,u32 * key_idx)238 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev,
239 const struct rtw89_fw_mss_pool_hdr *mss_hdr,
240 u32 rmp_tbl_size, u32 *key_idx)
241 {
242 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
243 u32 sel_byte_idx;
244 u32 mss_sel_idx;
245 u8 sel_bit_idx;
246 int i;
247
248 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) {
249 if (!mss_hdr->defen)
250 return -ENOENT;
251
252 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
253 sec->mss_key_num;
254 } else {
255 if (mss_hdr->defen)
256 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3;
257 else
258 mss_sel_idx = 0;
259 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) *
260 le16_to_cpu(mss_hdr->msscust_max) +
261 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) +
262 sec->mss_key_num;
263 }
264
265 sel_byte_idx = mss_sel_idx >> 3;
266 sel_bit_idx = mss_sel_idx & 0x7;
267
268 if (sel_byte_idx >= rmp_tbl_size)
269 return -EFAULT;
270
271 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx)))
272 return -ENOENT;
273
274 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1));
275
276 for (i = 0; i < sel_byte_idx; i++)
277 *key_idx += hweight8(mss_hdr->rmp_tbl[i]);
278
279 return 0;
280 }
281
__parse_formatted_mssc(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content,u32 * mssc_len)282 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev,
283 struct rtw89_fw_bin_info *info,
284 struct rtw89_fw_hdr_section_info *section_info,
285 const void *content,
286 u32 *mssc_len)
287 {
288 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len;
289 const union rtw89_fw_section_mssc_content *section_content = content;
290 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
291 u32 rmp_tbl_size;
292 u32 key_sign_len;
293 u32 real_key_idx;
294 u32 sb_sel_ver;
295 int ret;
296
297 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) {
298 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n");
299 return -ENOENT;
300 }
301
302 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) {
303 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) *
304 le16_to_cpu(mss_hdr->msscust_max) *
305 mss_hdr->mssdev_max) >> 3;
306 if (mss_hdr->defen)
307 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE;
308 } else {
309 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n",
310 mss_hdr->rmpfmt);
311 return -EINVAL;
312 }
313
314 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) {
315 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n",
316 rmp_tbl_size, (int)sizeof(*mss_hdr),
317 le32_to_cpu(mss_hdr->key_raw_offset));
318 return -EINVAL;
319 }
320
321 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2;
322 if (!key_sign_len)
323 key_sign_len = 512;
324
325 if (info->dsp_checksum)
326 key_sign_len += FWDL_SECURITY_CHKSUM_LEN;
327
328 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size +
329 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len;
330
331 if (!sec->secure_boot)
332 goto out;
333
334 sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v);
335 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn)
336 goto ignore;
337
338 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx);
339 if (ret)
340 goto ignore;
341
342 section_info->key_addr = content + section_info->len +
343 le32_to_cpu(mss_hdr->key_raw_offset) +
344 key_sign_len * real_key_idx;
345 section_info->key_len = key_sign_len;
346 section_info->key_idx = real_key_idx;
347
348 out:
349 if (info->secure_section_exist) {
350 section_info->ignore = true;
351 return 0;
352 }
353
354 info->secure_section_exist = true;
355
356 return 0;
357
358 ignore:
359 section_info->ignore = true;
360
361 return 0;
362 }
363
__check_secure_blacklist(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content)364 static int __check_secure_blacklist(struct rtw89_dev *rtwdev,
365 struct rtw89_fw_bin_info *info,
366 struct rtw89_fw_hdr_section_info *section_info,
367 const void *content)
368 {
369 const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist;
370 const union rtw89_fw_section_mssc_content *section_content = content;
371 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
372 u8 byte_idx;
373 u8 bit_mask;
374
375 if (!sec->secure_boot)
376 return 0;
377
378 if (!info->secure_section_exist || section_info->ignore)
379 return 0;
380
381 if (!chip_blacklist) {
382 rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n");
383 return -ENOENT;
384 }
385
386 byte_idx = section_content->blacklist.bit_in_chip_list >> 3;
387 bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7);
388
389 if (section_content->blacklist.ver > chip_blacklist->ver) {
390 rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n",
391 section_content->blacklist.ver, chip_blacklist->ver);
392 return -EINVAL;
393 }
394
395 if (chip_blacklist->list[byte_idx] & bit_mask) {
396 rtw89_warn(rtwdev, "firmware %u in chip blacklist\n",
397 section_content->blacklist.ver);
398 return -EPERM;
399 }
400
401 return 0;
402 }
403
__parse_security_section(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_section_info * section_info,const void * content,u32 * mssc_len)404 static int __parse_security_section(struct rtw89_dev *rtwdev,
405 struct rtw89_fw_bin_info *info,
406 struct rtw89_fw_hdr_section_info *section_info,
407 const void *content,
408 u32 *mssc_len)
409 {
410 struct rtw89_fw_secure *sec = &rtwdev->fw.sec;
411 int ret;
412
413 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) {
414 ret = __parse_formatted_mssc(rtwdev, info, section_info,
415 content, mssc_len);
416 if (ret)
417 return -EINVAL;
418 } else {
419 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN;
420 if (info->dsp_checksum)
421 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN;
422
423 if (sec->secure_boot) {
424 if (sec->mss_idx >= section_info->mssc) {
425 rtw89_err(rtwdev, "unexpected MSS %d >= %d\n",
426 sec->mss_idx, section_info->mssc);
427 return -EFAULT;
428 }
429 section_info->key_addr = content + section_info->len +
430 sec->mss_idx * FWDL_SECURITY_SIGLEN;
431 section_info->key_len = FWDL_SECURITY_SIGLEN;
432 }
433
434 info->secure_section_exist = true;
435 }
436
437 ret = __check_secure_blacklist(rtwdev, info, section_info, content);
438 WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n");
439
440 return 0;
441 }
442
rtw89_fw_hdr_parser_v1(struct rtw89_dev * rtwdev,const u8 * fw,u32 len,struct rtw89_fw_bin_info * info)443 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
444 struct rtw89_fw_bin_info *info)
445 {
446 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw;
447 const struct rtw89_chip_info *chip = rtwdev->chip;
448 struct rtw89_fw_hdr_section_info *section_info;
449 const struct rtw89_fw_dynhdr_hdr *fwdynhdr;
450 const struct rtw89_fw_hdr_section_v1 *section;
451 const u8 *fw_end = fw + len;
452 const u8 *bin;
453 u32 base_hdr_len;
454 u32 mssc_len;
455 int ret;
456 u32 i;
457
458 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM);
459 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM);
460 base_hdr_len = struct_size(fw_hdr, sections, info->section_num);
461 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR);
462 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE);
463
464 if (chip->chip_gen == RTW89_CHIP_AX)
465 info->part_size = FWDL_SECTION_PER_PKT_LEN;
466 else
467 info->part_size = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_PART_SIZE);
468
469 if (info->dynamic_hdr_en) {
470 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE);
471 info->dynamic_hdr_len = info->hdr_len - base_hdr_len;
472 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len);
473 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) {
474 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n");
475 return -EINVAL;
476 }
477 } else {
478 info->hdr_len = base_hdr_len;
479 info->dynamic_hdr_len = 0;
480 }
481
482 bin = fw + info->hdr_len;
483
484 /* jump to section header */
485 section_info = info->section_info;
486 for (i = 0; i < info->section_num; i++) {
487 section = &fw_hdr->sections[i];
488
489 section_info->type =
490 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE);
491 section_info->len =
492 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE);
493 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM))
494 section_info->len += FWDL_SECTION_CHKSUM_LEN;
495 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL);
496 section_info->dladdr =
497 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR);
498 section_info->addr = bin;
499
500 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
501 section_info->mssc =
502 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC);
503
504 ret = __parse_security_section(rtwdev, info, section_info,
505 bin, &mssc_len);
506 if (ret)
507 return ret;
508 } else {
509 section_info->mssc = 0;
510 mssc_len = 0;
511 }
512
513 rtw89_debug(rtwdev, RTW89_DBG_FW,
514 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n",
515 i, section_info->type, section_info->len,
516 section_info->mssc, mssc_len, bin - fw);
517 rtw89_debug(rtwdev, RTW89_DBG_FW,
518 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n",
519 section_info->ignore, section_info->key_addr,
520 section_info->key_addr ?
521 section_info->key_addr - section_info->addr : 0,
522 section_info->key_len, section_info->key_idx);
523
524 bin += section_info->len + mssc_len;
525 section_info++;
526 }
527
528 if (fw_end != bin) {
529 rtw89_err(rtwdev, "[ERR]fw bin size\n");
530 return -EINVAL;
531 }
532
533 if (!info->secure_section_exist)
534 rtw89_warn(rtwdev, "no firmware secure section\n");
535
536 return 0;
537 }
538
rtw89_fw_hdr_parser(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)539 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev,
540 const struct rtw89_fw_suit *fw_suit,
541 struct rtw89_fw_bin_info *info)
542 {
543 const u8 *fw = fw_suit->data;
544 u32 len = fw_suit->size;
545
546 if (!fw || !len) {
547 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type);
548 return -ENOENT;
549 }
550
551 switch (fw_suit->hdr_ver) {
552 case 0:
553 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info);
554 case 1:
555 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info);
556 default:
557 return -ENOENT;
558 }
559 }
560
561 static
rtw89_mfw_get_hdr_ptr(struct rtw89_dev * rtwdev,const struct firmware * firmware)562 const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev,
563 const struct firmware *firmware)
564 {
565 const struct rtw89_mfw_hdr *mfw_hdr;
566
567 if (sizeof(*mfw_hdr) > firmware->size)
568 return NULL;
569
570 mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0];
571
572 if (mfw_hdr->sig != RTW89_MFW_SIG)
573 return NULL;
574
575 return mfw_hdr;
576 }
577
rtw89_mfw_validate_hdr(struct rtw89_dev * rtwdev,const struct firmware * firmware,const struct rtw89_mfw_hdr * mfw_hdr)578 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev,
579 const struct firmware *firmware,
580 const struct rtw89_mfw_hdr *mfw_hdr)
581 {
582 const void *mfw = firmware->data;
583 u32 mfw_len = firmware->size;
584 u8 fw_nr = mfw_hdr->fw_nr;
585 const void *ptr;
586
587 if (fw_nr == 0) {
588 rtw89_err(rtwdev, "mfw header has no fw entry\n");
589 return -ENOENT;
590 }
591
592 ptr = &mfw_hdr->info[fw_nr];
593
594 if (ptr > mfw + mfw_len) {
595 rtw89_err(rtwdev, "mfw header out of address\n");
596 return -EFAULT;
597 }
598
599 return 0;
600 }
601
602 static
rtw89_mfw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit,bool nowarn)603 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
604 struct rtw89_fw_suit *fw_suit, bool nowarn)
605 {
606 struct rtw89_fw_info *fw_info = &rtwdev->fw;
607 const struct firmware *firmware = fw_info->req.firmware;
608 const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
609 const struct rtw89_mfw_hdr *mfw_hdr;
610 const u8 *mfw = firmware->data;
611 u32 mfw_len = firmware->size;
612 int ret;
613 int i;
614
615 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
616 if (!mfw_hdr) {
617 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
618 /* legacy firmware support normal type only */
619 if (type != RTW89_FW_NORMAL)
620 return -EINVAL;
621 fw_suit->data = mfw;
622 fw_suit->size = mfw_len;
623 return 0;
624 }
625
626 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
627 if (ret)
628 return ret;
629
630 for (i = 0; i < mfw_hdr->fw_nr; i++) {
631 tmp = &mfw_hdr->info[i];
632 if (tmp->type != type)
633 continue;
634
635 if (type == RTW89_FW_LOGFMT) {
636 mfw_info = tmp;
637 goto found;
638 }
639
640 /* Version order of WiFi firmware in firmware file are not in order,
641 * pass all firmware to find the equal or less but closest version.
642 */
643 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) {
644 if (!mfw_info || mfw_info->cv < tmp->cv)
645 mfw_info = tmp;
646 }
647 }
648
649 if (mfw_info)
650 goto found;
651
652 if (!nowarn)
653 rtw89_err(rtwdev, "no suitable firmware found\n");
654 return -ENOENT;
655
656 found:
657 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
658 fw_suit->size = le32_to_cpu(mfw_info->size);
659
660 if (fw_suit->data + fw_suit->size > mfw + mfw_len) {
661 rtw89_err(rtwdev, "fw_suit %d out of address\n", type);
662 return -EFAULT;
663 }
664
665 return 0;
666 }
667
rtw89_mfw_get_size(struct rtw89_dev * rtwdev)668 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev)
669 {
670 struct rtw89_fw_info *fw_info = &rtwdev->fw;
671 const struct firmware *firmware = fw_info->req.firmware;
672 const struct rtw89_mfw_info *mfw_info;
673 const struct rtw89_mfw_hdr *mfw_hdr;
674 u32 size;
675 int ret;
676
677 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware);
678 if (!mfw_hdr) {
679 rtw89_warn(rtwdev, "not mfw format\n");
680 return 0;
681 }
682
683 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr);
684 if (ret)
685 return ret;
686
687 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1];
688 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size);
689
690 return size;
691 }
692
rtw89_fw_update_ver_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr * hdr)693 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev,
694 struct rtw89_fw_suit *fw_suit,
695 const struct rtw89_fw_hdr *hdr)
696 {
697 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION);
698 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION);
699 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION);
700 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX);
701 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID);
702 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR);
703 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH);
704 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE);
705 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR);
706 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN);
707 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION);
708 }
709
rtw89_fw_update_ver_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit,const struct rtw89_fw_hdr_v1 * hdr)710 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev,
711 struct rtw89_fw_suit *fw_suit,
712 const struct rtw89_fw_hdr_v1 *hdr)
713 {
714 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION);
715 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION);
716 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION);
717 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX);
718 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID);
719 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR);
720 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH);
721 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE);
722 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR);
723 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN);
724 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION);
725 }
726
rtw89_fw_update_ver(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,struct rtw89_fw_suit * fw_suit)727 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
728 enum rtw89_fw_type type,
729 struct rtw89_fw_suit *fw_suit)
730 {
731 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data;
732 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data;
733
734 if (type == RTW89_FW_LOGFMT)
735 return 0;
736
737 fw_suit->type = type;
738 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER);
739
740 switch (fw_suit->hdr_ver) {
741 case 0:
742 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0);
743 break;
744 case 1:
745 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1);
746 break;
747 default:
748 rtw89_err(rtwdev, "Unknown firmware header version %u\n",
749 fw_suit->hdr_ver);
750 return -ENOENT;
751 }
752
753 rtw89_info(rtwdev,
754 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n",
755 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
756 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type);
757
758 return 0;
759 }
760
761 static
__rtw89_fw_recognize(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool nowarn)762 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
763 bool nowarn)
764 {
765 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
766 int ret;
767
768 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
769 if (ret)
770 return ret;
771
772 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
773 }
774
775 static
__rtw89_fw_recognize_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)776 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
777 const struct rtw89_fw_element_hdr *elm,
778 const union rtw89_fw_element_arg arg)
779 {
780 enum rtw89_fw_type type = arg.fw_type;
781 struct rtw89_hal *hal = &rtwdev->hal;
782 struct rtw89_fw_suit *fw_suit;
783
784 /* Version of BB MCU is in decreasing order in firmware file, so take
785 * first equal or less version, which is equal or less but closest version.
786 */
787 if (hal->cv < elm->u.bbmcu.cv)
788 return 1; /* ignore this element */
789
790 fw_suit = rtw89_fw_suit_get(rtwdev, type);
791 if (fw_suit->data)
792 return 1; /* ignore this element (a firmware is taken already) */
793
794 fw_suit->data = elm->u.bbmcu.contents;
795 fw_suit->size = le32_to_cpu(elm->size);
796
797 return rtw89_fw_update_ver(rtwdev, type, fw_suit);
798 }
799
800 #define __DEF_FW_FEAT_COND(__cond, __op) \
801 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \
802 { \
803 return suit_ver_code __op comp_ver_code; \
804 }
805
806 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */
807 __DEF_FW_FEAT_COND(le, <=); /* less or equal */
808 __DEF_FW_FEAT_COND(lt, <); /* less than */
809
810 struct __fw_feat_cfg {
811 enum rtw89_core_chip_id chip_id;
812 enum rtw89_fw_feature feature;
813 u32 ver_code;
814 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code);
815 bool disable;
816 int size;
817 };
818
819 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
820 { \
821 .chip_id = _chip, \
822 .feature = RTW89_FW_FEATURE_ ## _feat, \
823 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
824 .cond = __fw_feat_cond_ ## _cond, \
825 }
826
827 #define __S_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \
828 { \
829 .chip_id = _chip, \
830 .feature = RTW89_FW_FEATURE_ ## _feat, \
831 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
832 .cond = __fw_feat_cond_ ## _cond, \
833 .disable = true, \
834 .size = 1, \
835 }
836
837 #define __G_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _grp) \
838 { \
839 .chip_id = _chip, \
840 .feature = RTW89_FW_FEATURE_ ## _grp ## _MIN, \
841 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \
842 .cond = __fw_feat_cond_ ## _cond, \
843 .disable = true, \
844 .size = RTW89_FW_FEATURE_ ## _grp ## _MAX - \
845 RTW89_FW_FEATURE_ ## _grp ## _MIN + 1, \
846 }
847
848 #define __DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat, _type) \
849 __##_type##_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat)
850
851 static const struct __fw_feat_cfg fw_feat_tbl[] = {
852 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE),
853 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD),
854 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER_TYPE_0),
855 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 127, 0, SER_L1_BY_EVENT),
856 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C),
857 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT),
858 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD),
859 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
860 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER_TYPE_0),
861 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX),
862 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP),
863 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
864 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE),
865 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER_TYPE_0),
866 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD),
867 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER),
868 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 15, BEACON_LOSS_COUNT_V1),
869 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX),
870 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
871 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, SER_L1_BY_EVENT),
872 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1),
873 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP),
874 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING),
875 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C),
876 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG),
877 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE),
878 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0),
879 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD),
880 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER),
881 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 122, 0, BEACON_TRACKING),
882 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP),
883 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG),
884 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1),
885 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SER_L1_BY_EVENT),
886 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C),
887 __CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0),
888 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
889 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
890 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
891 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER_TYPE_0),
892 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
893 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
894 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1),
895 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG),
896 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1),
897 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING),
898 __CFG_FW_FEAT(RTL8852C, ge, 0, 29, 94, 0, SER_L1_BY_EVENT),
899 __CFG_FW_FEAT(RTL8852C, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C),
900 __CFG_FW_FEAT(RTL8922A, ge, 0, 0, 0, 0, RFK_PRE_NOTIFY_V0),
901 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
902 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0),
903 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
904 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD_EXTRA_OP),
905 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
906 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0),
907 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
908 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 28, 0, RFK_IQK_V0),
909 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 31, 0, RFK_PRE_NOTIFY_V1),
910 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO),
911 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0),
912 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO),
913 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0),
914 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 49, 0, RFK_PRE_NOTIFY_V2),
915 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 49, 0, RFK_PRE_NOTIFY_MCC_V0),
916 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1),
917 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE),
918 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1),
919 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG),
920 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1),
921 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING),
922 __DIS_FW_FEAT(RTL8922A, ge, 0, 35, 84, 0, WITH_RFK_PRE_NOTIFY, G),
923 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 84, 0, RFK_PRE_NOTIFY_MCC_V1),
924 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 84, 0, ADDR_CAM_V0),
925 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 97, 0, SIM_SER_L0L1_BY_HALT_H2C),
926 };
927
rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info * fw,const struct rtw89_chip_info * chip,u32 ver_code)928 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
929 const struct rtw89_chip_info *chip,
930 u32 ver_code)
931 {
932 int i;
933
934 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) {
935 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i];
936
937 if (chip->chip_id != ent->chip_id)
938 continue;
939
940 if (!ent->cond(ver_code, ent->ver_code))
941 continue;
942
943 if (!ent->disable) {
944 RTW89_SET_FW_FEATURE(ent->feature, fw);
945 continue;
946 }
947
948 for (int n = 0; n < ent->size; n++)
949 RTW89_CLR_FW_FEATURE(ent->feature + n, fw);
950 }
951 }
952
rtw89_fw_recognize_features(struct rtw89_dev * rtwdev)953 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
954 {
955 const struct rtw89_chip_info *chip = rtwdev->chip;
956 const struct rtw89_fw_suit *fw_suit;
957 u32 suit_ver_code;
958
959 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
960 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
961
962 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code);
963 }
964
965 const struct firmware *
rtw89_early_fw_feature_recognize(struct device * device,const struct rtw89_chip_info * chip,struct rtw89_fw_info * early_fw,int * used_fw_format)966 rtw89_early_fw_feature_recognize(struct device *device,
967 const struct rtw89_chip_info *chip,
968 struct rtw89_fw_info *early_fw,
969 int *used_fw_format)
970 {
971 const struct firmware *firmware;
972 char fw_name[64];
973 int fw_format;
974 u32 ver_code;
975 int ret;
976
977 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) {
978 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
979 chip->fw_basename, fw_format);
980
981 ret = request_firmware(&firmware, fw_name, device);
982 if (!ret) {
983 dev_info(device, "loaded firmware %s\n", fw_name);
984 *used_fw_format = fw_format;
985 break;
986 }
987 }
988
989 if (ret) {
990 dev_err(device, "failed to early request firmware: %d\n", ret);
991 return NULL;
992 }
993
994 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data);
995
996 if (!ver_code)
997 goto out;
998
999 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code);
1000
1001 out:
1002 return firmware;
1003 }
1004
rtw89_fw_validate_ver_required(struct rtw89_dev * rtwdev)1005 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev)
1006 {
1007 const struct rtw89_chip_variant *variant = rtwdev->variant;
1008 const struct rtw89_fw_suit *fw_suit;
1009 u32 suit_ver_code;
1010
1011 if (!variant)
1012 return 0;
1013
1014 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
1015 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
1016
1017 if (variant->fw_min_ver_code > suit_ver_code) {
1018 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n",
1019 variant->fw_min_ver_code);
1020 return -ENOENT;
1021 }
1022
1023 return 0;
1024 }
1025
rtw89_fw_recognize(struct rtw89_dev * rtwdev)1026 int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
1027 {
1028 const struct rtw89_chip_info *chip = rtwdev->chip;
1029 int ret;
1030
1031 if (chip->try_ce_fw) {
1032 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
1033 if (!ret)
1034 goto normal_done;
1035 }
1036
1037 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
1038 if (ret)
1039 return ret;
1040
1041 normal_done:
1042 ret = rtw89_fw_validate_ver_required(rtwdev);
1043 if (ret)
1044 return ret;
1045
1046 /* It still works if wowlan firmware isn't existing. */
1047 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
1048
1049 /* It still works if log format file isn't existing. */
1050 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true);
1051
1052 rtw89_fw_recognize_features(rtwdev);
1053
1054 rtw89_coex_recognize_ver(rtwdev);
1055
1056 return 0;
1057 }
1058
1059 static
rtw89_build_phy_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1060 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev,
1061 const struct rtw89_fw_element_hdr *elm,
1062 const union rtw89_fw_element_arg arg)
1063 {
1064 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1065 struct rtw89_hal *hal = &rtwdev->hal;
1066 struct rtw89_phy_table *tbl, **pp;
1067 struct rtw89_reg2_def *regs;
1068 bool radio = false;
1069 u32 n_regs, i;
1070 u16 aid;
1071 u8 idx;
1072
1073 switch (le32_to_cpu(elm->id)) {
1074 case RTW89_FW_ELEMENT_ID_BB_REG:
1075 pp = &elm_info->bb_tbl;
1076 break;
1077 case RTW89_FW_ELEMENT_ID_BB_GAIN:
1078 pp = &elm_info->bb_gain;
1079 break;
1080 case RTW89_FW_ELEMENT_ID_RADIO_A:
1081 case RTW89_FW_ELEMENT_ID_RADIO_B:
1082 case RTW89_FW_ELEMENT_ID_RADIO_C:
1083 case RTW89_FW_ELEMENT_ID_RADIO_D:
1084 idx = elm->u.reg2.idx;
1085 pp = &elm_info->rf_radio[idx];
1086
1087 radio = true;
1088 break;
1089 case RTW89_FW_ELEMENT_ID_RF_NCTL:
1090 pp = &elm_info->rf_nctl;
1091 break;
1092 default:
1093 return -ENOENT;
1094 }
1095
1096 aid = le16_to_cpu(elm->aid);
1097 if (aid && aid != hal->aid)
1098 return 1; /* ignore if aid not matched */
1099 else if (*pp)
1100 return 1; /* ignore if an element is existing */
1101
1102 tbl = kzalloc_obj(*tbl);
1103 if (!tbl)
1104 return -ENOMEM;
1105
1106 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]);
1107 regs = kzalloc_objs(*regs, n_regs);
1108 if (!regs)
1109 goto out;
1110
1111 for (i = 0; i < n_regs; i++) {
1112 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr);
1113 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data);
1114 }
1115
1116 tbl->n_regs = n_regs;
1117 tbl->regs = regs;
1118
1119 if (radio) {
1120 tbl->rf_path = arg.rf_path;
1121 tbl->config = rtw89_phy_config_rf_reg_v1;
1122 }
1123
1124 *pp = tbl;
1125
1126 return 0;
1127
1128 out:
1129 kfree(tbl);
1130 return -ENOMEM;
1131 }
1132
1133 static
rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1134 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev,
1135 const struct rtw89_fw_element_hdr *elm,
1136 const union rtw89_fw_element_arg arg)
1137 {
1138 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr;
1139 const unsigned long offset = arg.offset;
1140 struct rtw89_efuse *efuse = &rtwdev->efuse;
1141 struct rtw89_txpwr_conf *conf;
1142
1143 if (!rtwdev->rfe_data) {
1144 rtwdev->rfe_data = kzalloc_obj(*rtwdev->rfe_data);
1145 if (!rtwdev->rfe_data)
1146 return -ENOMEM;
1147 }
1148
1149 conf = (void *)rtwdev->rfe_data + offset;
1150
1151 /* if multiple matched, take the last eventually */
1152 if (txpwr_elm->rfe_type == efuse->rfe_type)
1153 goto setup;
1154
1155 /* without one is matched, accept default */
1156 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE &&
1157 (!rtw89_txpwr_conf_valid(conf) ||
1158 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE))
1159 goto setup;
1160
1161 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n",
1162 elm->id, txpwr_elm->rfe_type);
1163 return 0;
1164
1165 setup:
1166 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n",
1167 elm->id, txpwr_elm->rfe_type);
1168
1169 conf->rfe_type = txpwr_elm->rfe_type;
1170 conf->ent_sz = txpwr_elm->ent_sz;
1171 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents);
1172 conf->data = txpwr_elm->content;
1173 return 0;
1174 }
1175
1176 static
rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1177 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev,
1178 const struct rtw89_fw_element_hdr *elm,
1179 const union rtw89_fw_element_arg arg)
1180 {
1181 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1182 const struct rtw89_chip_info *chip = rtwdev->chip;
1183 u32 needed_bitmap = 0;
1184 u32 offset = 0;
1185 int subband;
1186 u32 bitmap;
1187 int type;
1188
1189 if (chip->support_bands & BIT(NL80211_BAND_6GHZ))
1190 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ;
1191 if (chip->support_bands & BIT(NL80211_BAND_5GHZ))
1192 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ;
1193 if (chip->support_bands & BIT(NL80211_BAND_2GHZ))
1194 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ;
1195
1196 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap);
1197
1198 if ((bitmap & needed_bitmap) != needed_bitmap) {
1199 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n",
1200 needed_bitmap, bitmap);
1201 return -ENOENT;
1202 }
1203
1204 elm_info->txpwr_trk = kzalloc_obj(*elm_info->txpwr_trk);
1205 if (!elm_info->txpwr_trk)
1206 return -ENOMEM;
1207
1208 for (type = 0; bitmap; type++, bitmap >>= 1) {
1209 if (!(bitmap & BIT(0)))
1210 continue;
1211
1212 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START &&
1213 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX)
1214 subband = 4;
1215 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START &&
1216 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX)
1217 subband = 3;
1218 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START &&
1219 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX)
1220 subband = 1;
1221 else
1222 break;
1223
1224 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset];
1225
1226 offset += subband;
1227 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size))
1228 goto err;
1229 }
1230
1231 return 0;
1232
1233 err:
1234 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n",
1235 offset, le32_to_cpu(elm->size));
1236 kfree(elm_info->txpwr_trk);
1237 elm_info->txpwr_trk = NULL;
1238
1239 return -EFAULT;
1240 }
1241
1242 static
rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1243 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev,
1244 const struct rtw89_fw_element_hdr *elm,
1245 const union rtw89_fw_element_arg arg)
1246 {
1247 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1248 u8 rfk_id;
1249
1250 if (elm_info->rfk_log_fmt)
1251 goto allocated;
1252
1253 elm_info->rfk_log_fmt = kzalloc_obj(*elm_info->rfk_log_fmt);
1254 if (!elm_info->rfk_log_fmt)
1255 return 1; /* this is an optional element, so just ignore this */
1256
1257 allocated:
1258 rfk_id = elm->u.rfk_log_fmt.rfk_id;
1259 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM)
1260 return 1;
1261
1262 elm_info->rfk_log_fmt->elm[rfk_id] = elm;
1263
1264 return 0;
1265 }
1266
rtw89_regd_entcpy(struct rtw89_regd * regd,const void * cursor,u8 cursor_size)1267 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor,
1268 u8 cursor_size)
1269 {
1270 /* fill default values if needed for backward compatibility */
1271 struct rtw89_fw_regd_entry entry = {
1272 .rule_2ghz = RTW89_NA,
1273 .rule_5ghz = RTW89_NA,
1274 .rule_6ghz = RTW89_NA,
1275 .fmap = cpu_to_le32(0x0),
1276 };
1277 u8 valid_size = min_t(u8, sizeof(entry), cursor_size);
1278 unsigned int i;
1279 u32 fmap;
1280
1281 memcpy(&entry, cursor, valid_size);
1282 memset(regd, 0, sizeof(*regd));
1283
1284 regd->alpha2[0] = entry.alpha2_0;
1285 regd->alpha2[1] = entry.alpha2_1;
1286 regd->alpha2[2] = '\0';
1287
1288 /* also need to consider forward compatibility */
1289 regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ?
1290 entry.rule_2ghz : RTW89_NA;
1291 regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ?
1292 entry.rule_5ghz : RTW89_NA;
1293 regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ?
1294 entry.rule_6ghz : RTW89_NA;
1295
1296 BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap));
1297 BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC);
1298
1299 fmap = le32_to_cpu(entry.fmap);
1300 for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) {
1301 if (fmap & BIT(i))
1302 set_bit(i, regd->func_bitmap);
1303 }
1304
1305 return true;
1306 }
1307
1308 #define rtw89_for_each_in_regd_element(regd, element) \
1309 for (const void *cursor = (element)->content, \
1310 *end = (element)->content + \
1311 le32_to_cpu((element)->num_ents) * (element)->ent_sz; \
1312 cursor < end; cursor += (element)->ent_sz) \
1313 if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz))
1314
1315 static
rtw89_recognize_regd_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1316 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev,
1317 const struct rtw89_fw_element_hdr *elm,
1318 const union rtw89_fw_element_arg arg)
1319 {
1320 const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd;
1321 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1322 u32 num_ents = le32_to_cpu(regd_elm->num_ents);
1323 struct rtw89_regd_data *p;
1324 struct rtw89_regd regd;
1325 u32 i = 0;
1326
1327 if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) {
1328 rtw89_warn(rtwdev,
1329 "regd element ents (%d) are over max num (%d)\n",
1330 num_ents, RTW89_REGD_MAX_COUNTRY_NUM);
1331 rtw89_warn(rtwdev,
1332 "regd element ignore and take another/common\n");
1333 return 1;
1334 }
1335
1336 if (elm_info->regd) {
1337 rtw89_debug(rtwdev, RTW89_DBG_REGD,
1338 "regd element take the latter\n");
1339 devm_kfree(rtwdev->dev, elm_info->regd);
1340 elm_info->regd = NULL;
1341 }
1342
1343 p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL);
1344 if (!p)
1345 return -ENOMEM;
1346
1347 p->nr = num_ents;
1348 rtw89_for_each_in_regd_element(®d, regd_elm)
1349 p->map[i++] = regd;
1350
1351 if (i != num_ents) {
1352 rtw89_err(rtwdev, "regd element has %d invalid ents\n",
1353 num_ents - i);
1354 devm_kfree(rtwdev->dev, p);
1355 return -EINVAL;
1356 }
1357
1358 elm_info->regd = p;
1359 return 0;
1360 }
1361
1362 static
rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1363 int rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev *rtwdev,
1364 const struct rtw89_fw_element_hdr *elm,
1365 const union rtw89_fw_element_arg arg)
1366 {
1367 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1368
1369 elm_info->afe = elm;
1370
1371 return 0;
1372 }
1373
1374 static
rtw89_recognize_diag_mac_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1375 int rtw89_recognize_diag_mac_from_elm(struct rtw89_dev *rtwdev,
1376 const struct rtw89_fw_element_hdr *elm,
1377 const union rtw89_fw_element_arg arg)
1378 {
1379 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1380
1381 elm_info->diag_mac = elm;
1382
1383 return 0;
1384 }
1385
1386 static
rtw89_build_tx_comp_from_elm(struct rtw89_dev * rtwdev,const struct rtw89_fw_element_hdr * elm,const union rtw89_fw_element_arg arg)1387 int rtw89_build_tx_comp_from_elm(struct rtw89_dev *rtwdev,
1388 const struct rtw89_fw_element_hdr *elm,
1389 const union rtw89_fw_element_arg arg)
1390 {
1391 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
1392 struct rtw89_hal *hal = &rtwdev->hal;
1393 u16 aid;
1394
1395 aid = le16_to_cpu(elm->aid);
1396 if (aid && aid != hal->aid)
1397 return 1; /* ignore if aid not matched */
1398 else if (elm_info->tx_comp)
1399 return 1; /* ignore if an element is existing */
1400
1401 elm_info->tx_comp = elm;
1402
1403 return 0;
1404 }
1405
1406 static const struct rtw89_fw_element_handler __fw_element_handlers[] = {
1407 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm,
1408 { .fw_type = RTW89_FW_BBMCU0 }, NULL},
1409 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm,
1410 { .fw_type = RTW89_FW_BBMCU1 }, NULL},
1411 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"},
1412 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL},
1413 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm,
1414 { .rf_path = RF_PATH_A }, "radio A"},
1415 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm,
1416 { .rf_path = RF_PATH_B }, NULL},
1417 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm,
1418 { .rf_path = RF_PATH_C }, NULL},
1419 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm,
1420 { .rf_path = RF_PATH_D }, NULL},
1421 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"},
1422 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = {
1423 rtw89_fw_recognize_txpwr_from_elm,
1424 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR",
1425 },
1426 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = {
1427 rtw89_fw_recognize_txpwr_from_elm,
1428 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL,
1429 },
1430 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = {
1431 rtw89_fw_recognize_txpwr_from_elm,
1432 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL,
1433 },
1434 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = {
1435 rtw89_fw_recognize_txpwr_from_elm,
1436 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL,
1437 },
1438 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = {
1439 rtw89_fw_recognize_txpwr_from_elm,
1440 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL,
1441 },
1442 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = {
1443 rtw89_fw_recognize_txpwr_from_elm,
1444 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL,
1445 },
1446 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = {
1447 rtw89_fw_recognize_txpwr_from_elm,
1448 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL,
1449 },
1450 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = {
1451 rtw89_fw_recognize_txpwr_from_elm,
1452 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL,
1453 },
1454 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = {
1455 rtw89_fw_recognize_txpwr_from_elm,
1456 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL,
1457 },
1458 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = {
1459 rtw89_fw_recognize_txpwr_from_elm,
1460 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL,
1461 },
1462 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = {
1463 rtw89_fw_recognize_txpwr_from_elm,
1464 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL,
1465 },
1466 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = {
1467 rtw89_fw_recognize_txpwr_from_elm,
1468 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL,
1469 },
1470 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = {
1471 rtw89_fw_recognize_txpwr_from_elm,
1472 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL,
1473 },
1474 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = {
1475 rtw89_fw_recognize_txpwr_from_elm,
1476 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL,
1477 },
1478 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = {
1479 rtw89_fw_recognize_txpwr_from_elm,
1480 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL,
1481 },
1482 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = {
1483 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK",
1484 },
1485 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = {
1486 rtw89_build_rfk_log_fmt_from_elm, {}, NULL,
1487 },
1488 [RTW89_FW_ELEMENT_ID_REGD] = {
1489 rtw89_recognize_regd_from_elm, {}, "REGD",
1490 },
1491 [RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ] = {
1492 rtw89_build_afe_pwr_seq_from_elm, {}, "AFE",
1493 },
1494 [RTW89_FW_ELEMENT_ID_DIAG_MAC] = {
1495 rtw89_recognize_diag_mac_from_elm, {}, NULL,
1496 },
1497 [RTW89_FW_ELEMENT_ID_TX_COMP] = {
1498 rtw89_build_tx_comp_from_elm, {}, NULL,
1499 },
1500 };
1501
rtw89_fw_recognize_elements(struct rtw89_dev * rtwdev)1502 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev)
1503 {
1504 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1505 const struct firmware *firmware = fw_info->req.firmware;
1506 const struct rtw89_chip_info *chip = rtwdev->chip;
1507 u32 unrecognized_elements = chip->needed_fw_elms;
1508 const struct rtw89_fw_element_handler *handler;
1509 const struct rtw89_fw_element_hdr *hdr;
1510 u32 elm_size;
1511 u32 elem_id;
1512 u32 offset;
1513 int ret;
1514
1515 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM);
1516
1517 offset = rtw89_mfw_get_size(rtwdev);
1518 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1519 if (offset == 0)
1520 return -EINVAL;
1521
1522 while (offset + sizeof(*hdr) < firmware->size) {
1523 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset);
1524
1525 elm_size = le32_to_cpu(hdr->size);
1526 if (offset + elm_size >= firmware->size) {
1527 rtw89_warn(rtwdev, "firmware element size exceeds\n");
1528 break;
1529 }
1530
1531 elem_id = le32_to_cpu(hdr->id);
1532 if (elem_id >= ARRAY_SIZE(__fw_element_handlers))
1533 goto next;
1534
1535 handler = &__fw_element_handlers[elem_id];
1536 if (!handler->fn)
1537 goto next;
1538
1539 ret = handler->fn(rtwdev, hdr, handler->arg);
1540 if (ret == 1) /* ignore this element */
1541 goto next;
1542 if (ret)
1543 return ret;
1544
1545 if (handler->name)
1546 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n",
1547 handler->name, hdr->ver);
1548
1549 unrecognized_elements &= ~BIT(elem_id);
1550 next:
1551 offset += sizeof(*hdr) + elm_size;
1552 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN);
1553 }
1554
1555 if (unrecognized_elements) {
1556 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n",
1557 unrecognized_elements);
1558 return -ENOENT;
1559 }
1560
1561 return 0;
1562 }
1563
rtw89_h2c_pkt_set_hdr(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,bool rack,bool dack,u32 len)1564 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
1565 u8 type, u8 cat, u8 class, u8 func,
1566 bool rack, bool dack, u32 len)
1567 {
1568 const struct rtw89_chip_info *chip = rtwdev->chip;
1569 struct fwcmd_hdr *hdr;
1570
1571 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1572
1573 if (chip->chip_gen == RTW89_CHIP_AX && !(rtwdev->fw.h2c_seq % 4))
1574 rack = true;
1575 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1576 FIELD_PREP(H2C_HDR_CAT, cat) |
1577 FIELD_PREP(H2C_HDR_CLASS, class) |
1578 FIELD_PREP(H2C_HDR_FUNC, func) |
1579 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1580
1581 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1582 len + H2C_HEADER_LEN) |
1583 (rack ? H2C_HDR_REC_ACK : 0) |
1584 (dack ? H2C_HDR_DONE_ACK : 0));
1585
1586 rtwdev->fw.h2c_seq++;
1587 }
1588
rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev * rtwdev,struct sk_buff * skb,u8 type,u8 cat,u8 class,u8 func,u32 len)1589 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
1590 struct sk_buff *skb,
1591 u8 type, u8 cat, u8 class, u8 func,
1592 u32 len)
1593 {
1594 struct fwcmd_hdr *hdr;
1595
1596 hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
1597
1598 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
1599 FIELD_PREP(H2C_HDR_CAT, cat) |
1600 FIELD_PREP(H2C_HDR_CLASS, class) |
1601 FIELD_PREP(H2C_HDR_FUNC, func) |
1602 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
1603
1604 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
1605 len + H2C_HEADER_LEN));
1606 }
1607
__rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr * fw_hdr)1608 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev,
1609 struct rtw89_fw_bin_info *info,
1610 struct rtw89_fw_hdr *fw_hdr)
1611 {
1612 struct rtw89_fw_hdr_section_info *section_info;
1613 struct rtw89_fw_hdr_section *section;
1614 int i;
1615
1616 le32p_replace_bits(&fw_hdr->w7, info->part_size, FW_HDR_W7_PART_SIZE);
1617
1618 for (i = 0; i < info->section_num; i++) {
1619 section_info = &info->section_info[i];
1620
1621 if (!section_info->len_override)
1622 continue;
1623
1624 section = &fw_hdr->sections[i];
1625 le32p_replace_bits(§ion->w1, section_info->len_override,
1626 FWSECTION_HDR_W1_SEC_SIZE);
1627 }
1628
1629 return 0;
1630 }
1631
__rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev * rtwdev,struct rtw89_fw_bin_info * info,struct rtw89_fw_hdr_v1 * fw_hdr)1632 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev,
1633 struct rtw89_fw_bin_info *info,
1634 struct rtw89_fw_hdr_v1 *fw_hdr)
1635 {
1636 struct rtw89_fw_hdr_section_info *section_info;
1637 struct rtw89_fw_hdr_section_v1 *section;
1638 u8 dst_sec_idx = 0;
1639 u8 sec_idx;
1640
1641 le32p_replace_bits(&fw_hdr->w7, info->part_size, FW_HDR_V1_W7_PART_SIZE);
1642
1643 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) {
1644 section_info = &info->section_info[sec_idx];
1645 section = &fw_hdr->sections[sec_idx];
1646
1647 if (section_info->ignore)
1648 continue;
1649
1650 if (dst_sec_idx != sec_idx)
1651 fw_hdr->sections[dst_sec_idx] = *section;
1652
1653 dst_sec_idx++;
1654 }
1655
1656 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM);
1657
1658 return (info->section_num - dst_sec_idx) * sizeof(*section);
1659 }
1660
__rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1661 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1662 const struct rtw89_fw_suit *fw_suit,
1663 struct rtw89_fw_bin_info *info)
1664 {
1665 u32 len = info->hdr_len - info->dynamic_hdr_len;
1666 struct rtw89_fw_hdr_v1 *fw_hdr_v1;
1667 const u8 *fw = fw_suit->data;
1668 struct rtw89_fw_hdr *fw_hdr;
1669 struct sk_buff *skb;
1670 u32 truncated;
1671 int ret;
1672
1673 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
1674 if (!skb) {
1675 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
1676 return -ENOMEM;
1677 }
1678
1679 skb_put_data(skb, fw, len);
1680
1681 switch (fw_suit->hdr_ver) {
1682 case 0:
1683 fw_hdr = (struct rtw89_fw_hdr *)skb->data;
1684 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr);
1685 break;
1686 case 1:
1687 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data;
1688 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1);
1689 break;
1690 default:
1691 ret = -EOPNOTSUPP;
1692 goto fail;
1693 }
1694
1695 if (truncated) {
1696 len -= truncated;
1697 skb_trim(skb, len);
1698 }
1699
1700 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
1701 H2C_CAT_MAC, H2C_CL_MAC_FWDL,
1702 H2C_FUNC_MAC_FWHDR_DL, len);
1703
1704 ret = rtw89_h2c_tx(rtwdev, skb, false);
1705 if (ret) {
1706 rtw89_err(rtwdev, "failed to send h2c\n");
1707 goto fail;
1708 }
1709
1710 return 0;
1711 fail:
1712 dev_kfree_skb_any(skb);
1713
1714 return ret;
1715 }
1716
rtw89_fw_download_hdr(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1717 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev,
1718 const struct rtw89_fw_suit *fw_suit,
1719 struct rtw89_fw_bin_info *info)
1720 {
1721 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1722 int ret;
1723
1724 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info);
1725 if (ret) {
1726 rtw89_err(rtwdev, "[ERR]FW header download\n");
1727 return ret;
1728 }
1729
1730 ret = mac->fwdl_check_path_ready(rtwdev, false);
1731 if (ret) {
1732 rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
1733 return ret;
1734 }
1735
1736 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
1737 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
1738
1739 return 0;
1740 }
1741
__rtw89_fw_download_main(struct rtw89_dev * rtwdev,struct rtw89_fw_hdr_section_info * info,u32 part_size)1742 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1743 struct rtw89_fw_hdr_section_info *info,
1744 u32 part_size)
1745 {
1746 struct sk_buff *skb;
1747 const u8 *section = info->addr;
1748 u32 residue_len = info->len;
1749 bool copy_key = false;
1750 u32 pkt_len;
1751 int ret;
1752
1753 if (info->ignore)
1754 return 0;
1755
1756 if (info->len_override) {
1757 if (info->len_override > info->len)
1758 rtw89_warn(rtwdev, "override length %u larger than original %u\n",
1759 info->len_override, info->len);
1760 else
1761 residue_len = info->len_override;
1762 }
1763
1764 if (info->key_addr && info->key_len) {
1765 if (residue_len > part_size || info->len < info->key_len)
1766 rtw89_warn(rtwdev,
1767 "ignore to copy key data because of len %d, %d, %d, %d\n",
1768 info->len, part_size,
1769 info->key_len, residue_len);
1770 else
1771 copy_key = true;
1772 }
1773
1774 while (residue_len) {
1775 pkt_len = min(residue_len, part_size);
1776
1777 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len);
1778 if (!skb) {
1779 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
1780 return -ENOMEM;
1781 }
1782 skb_put_data(skb, section, pkt_len);
1783
1784 if (copy_key)
1785 memcpy(skb->data + pkt_len - info->key_len,
1786 info->key_addr, info->key_len);
1787
1788 ret = rtw89_h2c_tx(rtwdev, skb, true);
1789 if (ret) {
1790 rtw89_err(rtwdev, "failed to send h2c\n");
1791 goto fail;
1792 }
1793
1794 section += pkt_len;
1795 residue_len -= pkt_len;
1796 }
1797
1798 return 0;
1799 fail:
1800 dev_kfree_skb_any(skb);
1801
1802 return ret;
1803 }
1804
1805 static enum rtw89_fwdl_check_type
rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit)1806 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev,
1807 const struct rtw89_fw_suit *fw_suit)
1808 {
1809 switch (fw_suit->type) {
1810 case RTW89_FW_BBMCU0:
1811 return RTW89_FWDL_CHECK_BB0_FWDL_DONE;
1812 case RTW89_FW_BBMCU1:
1813 return RTW89_FWDL_CHECK_BB1_FWDL_DONE;
1814 default:
1815 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE;
1816 }
1817 }
1818
rtw89_fw_download_main(struct rtw89_dev * rtwdev,const struct rtw89_fw_suit * fw_suit,struct rtw89_fw_bin_info * info)1819 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev,
1820 const struct rtw89_fw_suit *fw_suit,
1821 struct rtw89_fw_bin_info *info)
1822 {
1823 struct rtw89_fw_hdr_section_info *section_info = info->section_info;
1824 const struct rtw89_chip_info *chip = rtwdev->chip;
1825 enum rtw89_fwdl_check_type chk_type;
1826 u8 section_num = info->section_num;
1827 int ret;
1828
1829 while (section_num--) {
1830 ret = __rtw89_fw_download_main(rtwdev, section_info, info->part_size);
1831 if (ret)
1832 return ret;
1833 section_info++;
1834 }
1835
1836 if (chip->chip_gen == RTW89_CHIP_AX)
1837 return 0;
1838
1839 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit);
1840 ret = rtw89_fw_check_rdy(rtwdev, chk_type);
1841 if (ret) {
1842 rtw89_warn(rtwdev, "failed to download firmware type %u\n",
1843 fw_suit->type);
1844 return ret;
1845 }
1846
1847 return 0;
1848 }
1849
rtw89_fw_prog_cnt_dump(struct rtw89_dev * rtwdev)1850 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
1851 {
1852 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
1853 u32 addr = R_AX_DBG_PORT_SEL;
1854 u32 val32;
1855 u16 index;
1856
1857 if (chip_gen == RTW89_CHIP_BE) {
1858 addr = R_BE_WLCPU_PORT_PC;
1859 goto dump;
1860 }
1861
1862 rtw89_write32(rtwdev, R_AX_DBG_CTRL,
1863 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
1864 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
1865 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
1866
1867 dump:
1868 for (index = 0; index < 15; index++) {
1869 val32 = rtw89_read32(rtwdev, addr);
1870 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
1871 fsleep(10);
1872 }
1873 }
1874
rtw89_fw_dl_fail_dump(struct rtw89_dev * rtwdev)1875 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
1876 {
1877 u32 val32;
1878
1879 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
1880 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
1881
1882 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG);
1883 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32);
1884
1885 rtw89_fw_prog_cnt_dump(rtwdev);
1886 }
1887
rtw89_fw_download_suit(struct rtw89_dev * rtwdev,struct rtw89_fw_suit * fw_suit)1888 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
1889 struct rtw89_fw_suit *fw_suit)
1890 {
1891 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1892 struct rtw89_fw_bin_info info = {};
1893 int ret;
1894
1895 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info);
1896 if (ret) {
1897 rtw89_err(rtwdev, "parse fw header fail\n");
1898 return ret;
1899 }
1900
1901 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode);
1902
1903 if (rtwdev->chip->chip_id == RTL8922A &&
1904 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN))
1905 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000);
1906
1907 ret = mac->fwdl_check_path_ready(rtwdev, true);
1908 if (ret) {
1909 rtw89_err(rtwdev, "[ERR]H2C path ready\n");
1910 return ret;
1911 }
1912
1913 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info);
1914 if (ret)
1915 return ret;
1916
1917 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info);
1918 if (ret)
1919 return ret;
1920
1921 return 0;
1922 }
1923
1924 static
__rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1925 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1926 bool include_bb)
1927 {
1928 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
1929 struct rtw89_fw_info *fw_info = &rtwdev->fw;
1930 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
1931 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr;
1932 int ret;
1933 int i;
1934
1935 mac->disable_cpu(rtwdev);
1936 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb);
1937 if (ret)
1938 return ret;
1939
1940 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1941 if (ret)
1942 goto fwdl_err;
1943
1944 for (i = 0; i < bbmcu_nr && include_bb; i++) {
1945 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i);
1946
1947 ret = rtw89_fw_download_suit(rtwdev, fw_suit);
1948 if (ret)
1949 goto fwdl_err;
1950 }
1951
1952 fw_info->h2c_seq = 0;
1953 fw_info->rec_seq = 0;
1954 fw_info->h2c_counter = 0;
1955 fw_info->c2h_counter = 0;
1956 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
1957 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
1958
1959 mdelay(5);
1960
1961 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
1962 if (ret) {
1963 rtw89_warn(rtwdev, "download firmware fail\n");
1964 goto fwdl_err;
1965 }
1966
1967 return ret;
1968
1969 fwdl_err:
1970 rtw89_fw_dl_fail_dump(rtwdev);
1971 return ret;
1972 }
1973
rtw89_fw_download(struct rtw89_dev * rtwdev,enum rtw89_fw_type type,bool include_bb)1974 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
1975 bool include_bb)
1976 {
1977 int retry;
1978 int ret;
1979
1980 for (retry = 0; retry < 5; retry++) {
1981 ret = __rtw89_fw_download(rtwdev, type, include_bb);
1982 if (!ret)
1983 return 0;
1984 }
1985
1986 return ret;
1987 }
1988
rtw89_wait_firmware_completion(struct rtw89_dev * rtwdev)1989 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
1990 {
1991 struct rtw89_fw_info *fw = &rtwdev->fw;
1992
1993 wait_for_completion(&fw->req.completion);
1994 if (!fw->req.firmware)
1995 return -EINVAL;
1996
1997 return 0;
1998 }
1999
rtw89_load_firmware_req(struct rtw89_dev * rtwdev,struct rtw89_fw_req_info * req,const char * fw_name,bool nowarn)2000 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev,
2001 struct rtw89_fw_req_info *req,
2002 const char *fw_name, bool nowarn)
2003 {
2004 int ret;
2005
2006 if (req->firmware) {
2007 rtw89_debug(rtwdev, RTW89_DBG_FW,
2008 "full firmware has been early requested\n");
2009 complete_all(&req->completion);
2010 return 0;
2011 }
2012
2013 if (nowarn)
2014 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev);
2015 else
2016 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev);
2017
2018 complete_all(&req->completion);
2019
2020 return ret;
2021 }
2022
rtw89_load_firmware_work(struct work_struct * work)2023 void rtw89_load_firmware_work(struct work_struct *work)
2024 {
2025 struct rtw89_dev *rtwdev =
2026 container_of(work, struct rtw89_dev, load_firmware_work);
2027 const struct rtw89_chip_info *chip = rtwdev->chip;
2028 char fw_name[64];
2029
2030 rtw89_fw_get_filename(fw_name, sizeof(fw_name),
2031 chip->fw_basename, rtwdev->fw.fw_format);
2032
2033 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false);
2034 }
2035
rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table * tbl)2036 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl)
2037 {
2038 if (!tbl)
2039 return;
2040
2041 kfree(tbl->regs);
2042 kfree(tbl);
2043 }
2044
rtw89_unload_firmware_elements(struct rtw89_dev * rtwdev)2045 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev)
2046 {
2047 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info;
2048 int i;
2049
2050 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl);
2051 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain);
2052 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++)
2053 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]);
2054 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl);
2055
2056 kfree(elm_info->txpwr_trk);
2057 kfree(elm_info->rfk_log_fmt);
2058 }
2059
rtw89_unload_firmware(struct rtw89_dev * rtwdev)2060 void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
2061 {
2062 struct rtw89_fw_info *fw = &rtwdev->fw;
2063
2064 cancel_work_sync(&rtwdev->load_firmware_work);
2065
2066 if (fw->req.firmware) {
2067 release_firmware(fw->req.firmware);
2068
2069 /* assign NULL back in case rtw89_free_ieee80211_hw()
2070 * try to release the same one again.
2071 */
2072 fw->req.firmware = NULL;
2073 }
2074
2075 kfree(fw->log.fmts);
2076 rtw89_unload_firmware_elements(rtwdev);
2077 }
2078
rtw89_fw_log_get_fmt_idx(struct rtw89_dev * rtwdev,u32 fmt_id)2079 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id)
2080 {
2081 struct rtw89_fw_log *fw_log = &rtwdev->fw.log;
2082 u32 i;
2083
2084 if (fmt_id > fw_log->last_fmt_id)
2085 return 0;
2086
2087 for (i = 0; i < fw_log->fmt_count; i++) {
2088 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id)
2089 return i;
2090 }
2091 return 0;
2092 }
2093
rtw89_fw_log_create_fmts_dict(struct rtw89_dev * rtwdev)2094 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev)
2095 {
2096 struct rtw89_fw_log *log = &rtwdev->fw.log;
2097 const struct rtw89_fw_logsuit_hdr *suit_hdr;
2098 struct rtw89_fw_suit *suit = &log->suit;
2099 const void *fmts_ptr, *fmts_end_ptr;
2100 u32 fmt_count;
2101 int i;
2102
2103 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data;
2104 fmt_count = le32_to_cpu(suit_hdr->count);
2105 log->fmt_ids = suit_hdr->ids;
2106 fmts_ptr = &suit_hdr->ids[fmt_count];
2107 fmts_end_ptr = suit->data + suit->size;
2108 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL);
2109 if (!log->fmts)
2110 return -ENOMEM;
2111
2112 for (i = 0; i < fmt_count; i++) {
2113 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr);
2114 if (!fmts_ptr)
2115 break;
2116
2117 (*log->fmts)[i] = fmts_ptr;
2118 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]);
2119 log->fmt_count++;
2120 fmts_ptr += strlen(fmts_ptr);
2121 }
2122
2123 return 0;
2124 }
2125
rtw89_fw_log_prepare(struct rtw89_dev * rtwdev)2126 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev)
2127 {
2128 struct rtw89_fw_log *log = &rtwdev->fw.log;
2129 struct rtw89_fw_suit *suit = &log->suit;
2130
2131 if (!suit || !suit->data) {
2132 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n");
2133 return -EINVAL;
2134 }
2135 if (log->fmts)
2136 return 0;
2137
2138 return rtw89_fw_log_create_fmts_dict(rtwdev);
2139 }
2140
rtw89_fw_log_dump_data(struct rtw89_dev * rtwdev,const struct rtw89_fw_c2h_log_fmt * log_fmt,u32 fmt_idx,u8 para_int,bool raw_data)2141 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev,
2142 const struct rtw89_fw_c2h_log_fmt *log_fmt,
2143 u32 fmt_idx, u8 para_int, bool raw_data)
2144 {
2145 const char *(*fmts)[] = rtwdev->fw.log.fmts;
2146 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE];
2147 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0};
2148 int i;
2149
2150 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) {
2151 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n",
2152 log_fmt->argc);
2153 return;
2154 }
2155
2156 if (para_int)
2157 for (i = 0 ; i < log_fmt->argc; i++)
2158 args[i] = le32_to_cpu(log_fmt->u.argv[i]);
2159
2160 if (raw_data) {
2161 if (para_int)
2162 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
2163 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id),
2164 para_int, log_fmt->argc, (int)sizeof(args), args);
2165 else
2166 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE,
2167 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id),
2168 para_int, log_fmt->argc, log_fmt->u.raw);
2169 } else {
2170 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx],
2171 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4],
2172 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9],
2173 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe],
2174 args[0xf]);
2175 }
2176
2177 rtw89_info(rtwdev, "C2H log: %s", str_buf);
2178 }
2179
rtw89_fw_log_dump(struct rtw89_dev * rtwdev,u8 * buf,u32 len)2180 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
2181 {
2182 const struct rtw89_fw_c2h_log_fmt *log_fmt;
2183 u8 para_int;
2184 u32 fmt_idx;
2185
2186 if (len < RTW89_C2H_HEADER_LEN) {
2187 rtw89_err(rtwdev, "c2h log length is wrong!\n");
2188 return;
2189 }
2190
2191 buf += RTW89_C2H_HEADER_LEN;
2192 len -= RTW89_C2H_HEADER_LEN;
2193 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf;
2194
2195 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN)
2196 goto plain_log;
2197
2198 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE))
2199 goto plain_log;
2200
2201 if (!rtwdev->fw.log.fmts)
2202 return;
2203
2204 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT);
2205 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id));
2206
2207 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0)
2208 rtw89_info(rtwdev, "C2H log: %s%s",
2209 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw);
2210 else if (fmt_idx != 0 && para_int)
2211 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false);
2212 else
2213 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true);
2214 return;
2215
2216 plain_log:
2217 rtw89_info(rtwdev, "C2H log: %.*s", len, buf);
2218
2219 }
2220
rtw89_fw_h2c_cam(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,const u8 * scan_mac_addr,enum rtw89_upd_mode upd_mode)2221 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
2222 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr,
2223 enum rtw89_upd_mode upd_mode)
2224 {
2225 const struct rtw89_chip_info *chip = rtwdev->chip;
2226 struct rtw89_h2c_addr_cam_v0 *h2c_v0;
2227 struct rtw89_h2c_addr_cam *h2c;
2228 u32 len = sizeof(*h2c);
2229 struct sk_buff *skb;
2230 u8 ver = U8_MAX;
2231 int ret;
2232
2233 if (RTW89_CHK_FW_FEATURE(ADDR_CAM_V0, &rtwdev->fw) ||
2234 chip->chip_gen == RTW89_CHIP_AX) {
2235 len = sizeof(*h2c_v0);
2236 ver = 0;
2237 }
2238
2239 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2240 if (!skb) {
2241 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
2242 return -ENOMEM;
2243 }
2244 skb_put(skb, len);
2245 h2c_v0 = (struct rtw89_h2c_addr_cam_v0 *)skb->data;
2246
2247 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link,
2248 scan_mac_addr, h2c_v0);
2249 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, h2c_v0);
2250
2251 if (ver == 0)
2252 goto hdr;
2253
2254 h2c = (struct rtw89_h2c_addr_cam *)skb->data;
2255 h2c->w15 = le32_encode_bits(upd_mode, ADDR_CAM_W15_UPD_MODE);
2256
2257 hdr:
2258 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2259 H2C_CAT_MAC,
2260 H2C_CL_MAC_ADDR_CAM_UPDATE,
2261 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
2262 len);
2263
2264 ret = rtw89_h2c_tx(rtwdev, skb, false);
2265 if (ret) {
2266 rtw89_err(rtwdev, "failed to send h2c\n");
2267 goto fail;
2268 }
2269
2270 return 0;
2271 fail:
2272 dev_kfree_skb_any(skb);
2273
2274 return ret;
2275 }
2276
rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2277 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
2278 struct rtw89_vif_link *rtwvif_link,
2279 struct rtw89_sta_link *rtwsta_link)
2280 {
2281 struct rtw89_h2c_dctlinfo_ud_v1 *h2c;
2282 u32 len = sizeof(*h2c);
2283 struct sk_buff *skb;
2284 int ret;
2285
2286 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2287 if (!skb) {
2288 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2289 return -ENOMEM;
2290 }
2291 skb_put(skb, len);
2292 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data;
2293
2294 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c);
2295
2296 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2297 H2C_CAT_MAC,
2298 H2C_CL_MAC_FR_EXCHG,
2299 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
2300 len);
2301
2302 ret = rtw89_h2c_tx(rtwdev, skb, false);
2303 if (ret) {
2304 rtw89_err(rtwdev, "failed to send h2c\n");
2305 goto fail;
2306 }
2307
2308 return 0;
2309 fail:
2310 dev_kfree_skb_any(skb);
2311
2312 return ret;
2313 }
2314 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1);
2315
rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2316 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev,
2317 struct rtw89_vif_link *rtwvif_link,
2318 struct rtw89_sta_link *rtwsta_link)
2319 {
2320 struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
2321 u32 len = sizeof(*h2c);
2322 struct sk_buff *skb;
2323 int ret;
2324
2325 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2326 if (!skb) {
2327 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2328 return -ENOMEM;
2329 }
2330 skb_put(skb, len);
2331 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
2332
2333 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c);
2334
2335 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2336 H2C_CAT_MAC,
2337 H2C_CL_MAC_FR_EXCHG,
2338 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
2339 len);
2340
2341 ret = rtw89_h2c_tx(rtwdev, skb, false);
2342 if (ret) {
2343 rtw89_err(rtwdev, "failed to send h2c\n");
2344 goto fail;
2345 }
2346
2347 return 0;
2348 fail:
2349 dev_kfree_skb_any(skb);
2350
2351 return ret;
2352 }
2353 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2);
2354
rtw89_fw_h2c_dctl_sec_cam_v3(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2355 int rtw89_fw_h2c_dctl_sec_cam_v3(struct rtw89_dev *rtwdev,
2356 struct rtw89_vif_link *rtwvif_link,
2357 struct rtw89_sta_link *rtwsta_link)
2358 {
2359 struct rtw89_h2c_dctlinfo_ud_v3 *h2c;
2360 u32 len = sizeof(*h2c);
2361 struct sk_buff *skb;
2362 int ret;
2363
2364 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2365 if (!skb) {
2366 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
2367 return -ENOMEM;
2368 }
2369 skb_put(skb, len);
2370 h2c = (struct rtw89_h2c_dctlinfo_ud_v3 *)skb->data;
2371
2372 rtw89_cam_fill_dctl_sec_cam_info_v3(rtwdev, rtwvif_link, rtwsta_link, h2c);
2373
2374 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2375 H2C_CAT_MAC,
2376 H2C_CL_MAC_FR_EXCHG,
2377 H2C_FUNC_MAC_DCTLINFO_UD_V3, 0, 0,
2378 len);
2379
2380 ret = rtw89_h2c_tx(rtwdev, skb, false);
2381 if (ret) {
2382 rtw89_err(rtwdev, "failed to send h2c\n");
2383 goto fail;
2384 }
2385
2386 return 0;
2387 fail:
2388 dev_kfree_skb_any(skb);
2389
2390 return ret;
2391 }
2392 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v3);
2393
rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2394 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev,
2395 struct rtw89_vif_link *rtwvif_link,
2396 struct rtw89_sta_link *rtwsta_link)
2397 {
2398 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
2399 struct rtw89_h2c_dctlinfo_ud_v2 *h2c;
2400 u32 len = sizeof(*h2c);
2401 struct sk_buff *skb;
2402 int ret;
2403
2404 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2405 if (!skb) {
2406 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
2407 return -ENOMEM;
2408 }
2409 skb_put(skb, len);
2410 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data;
2411
2412 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) |
2413 le32_encode_bits(1, DCTLINFO_V2_C0_OP);
2414
2415 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL);
2416 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL);
2417 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL);
2418 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL);
2419 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL);
2420 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL);
2421 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL);
2422 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL);
2423 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL);
2424 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL);
2425 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL);
2426 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL);
2427 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL);
2428
2429 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2430 H2C_CAT_MAC,
2431 H2C_CL_MAC_FR_EXCHG,
2432 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0,
2433 len);
2434
2435 ret = rtw89_h2c_tx(rtwdev, skb, false);
2436 if (ret) {
2437 rtw89_err(rtwdev, "failed to send h2c\n");
2438 goto fail;
2439 }
2440
2441 return 0;
2442 fail:
2443 dev_kfree_skb_any(skb);
2444
2445 return ret;
2446 }
2447 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2);
2448
rtw89_fw_h2c_default_dmac_tbl_v3(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)2449 int rtw89_fw_h2c_default_dmac_tbl_v3(struct rtw89_dev *rtwdev,
2450 struct rtw89_vif_link *rtwvif_link,
2451 struct rtw89_sta_link *rtwsta_link)
2452 {
2453 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
2454 struct rtw89_h2c_dctlinfo_ud_v3 *h2c;
2455 u32 len = sizeof(*h2c);
2456 struct sk_buff *skb;
2457 int ret;
2458
2459 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2460 if (!skb) {
2461 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n");
2462 return -ENOMEM;
2463 }
2464 skb_put(skb, len);
2465 h2c = (struct rtw89_h2c_dctlinfo_ud_v3 *)skb->data;
2466
2467 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V3_C0_MACID) |
2468 le32_encode_bits(1, DCTLINFO_V3_C0_OP);
2469
2470 h2c->m0 = cpu_to_le32(DCTLINFO_V3_W0_ALL);
2471 h2c->m1 = cpu_to_le32(DCTLINFO_V3_W1_ALL);
2472 h2c->m2 = cpu_to_le32(DCTLINFO_V3_W2_ALL);
2473 h2c->m3 = cpu_to_le32(DCTLINFO_V3_W3_ALL);
2474 h2c->m4 = cpu_to_le32(DCTLINFO_V3_W4_ALL);
2475 h2c->m5 = cpu_to_le32(DCTLINFO_V3_W5_ALL);
2476 h2c->m6 = cpu_to_le32(DCTLINFO_V3_W6_ALL);
2477 h2c->m7 = cpu_to_le32(DCTLINFO_V3_W7_ALL);
2478 h2c->m8 = cpu_to_le32(DCTLINFO_V3_W8_ALL);
2479 h2c->m9 = cpu_to_le32(DCTLINFO_V3_W9_ALL);
2480 h2c->m10 = cpu_to_le32(DCTLINFO_V3_W10_ALL);
2481 h2c->m11 = cpu_to_le32(DCTLINFO_V3_W11_ALL);
2482 h2c->m12 = cpu_to_le32(DCTLINFO_V3_W12_ALL);
2483 h2c->m13 = cpu_to_le32(DCTLINFO_V3_W13_ALL);
2484
2485 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2486 H2C_CAT_MAC,
2487 H2C_CL_MAC_FR_EXCHG,
2488 H2C_FUNC_MAC_DCTLINFO_UD_V3, 0, 0,
2489 len);
2490
2491 ret = rtw89_h2c_tx(rtwdev, skb, false);
2492 if (ret) {
2493 rtw89_err(rtwdev, "failed to send h2c\n");
2494 goto fail;
2495 }
2496
2497 return 0;
2498 fail:
2499 dev_kfree_skb_any(skb);
2500
2501 return ret;
2502 }
2503 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v3);
2504
rtw89_fw_h2c_ba_cam(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool valid,struct ieee80211_ampdu_params * params)2505 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev,
2506 struct rtw89_vif_link *rtwvif_link,
2507 struct rtw89_sta_link *rtwsta_link,
2508 bool valid, struct ieee80211_ampdu_params *params)
2509 {
2510 const struct rtw89_chip_info *chip = rtwdev->chip;
2511 struct rtw89_h2c_ba_cam *h2c;
2512 u8 macid = rtwsta_link->mac_id;
2513 u32 len = sizeof(*h2c);
2514 struct sk_buff *skb;
2515 u8 entry_idx;
2516 int ret;
2517
2518 ret = valid ?
2519 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2520 &entry_idx) :
2521 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2522 &entry_idx);
2523 if (ret) {
2524 /* it still works even if we don't have static BA CAM, because
2525 * hardware can create dynamic BA CAM automatically.
2526 */
2527 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2528 "failed to %s entry tid=%d for h2c ba cam\n",
2529 valid ? "alloc" : "free", params->tid);
2530 return 0;
2531 }
2532
2533 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2534 if (!skb) {
2535 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2536 return -ENOMEM;
2537 }
2538 skb_put(skb, len);
2539 h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2540
2541 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID);
2542 if (chip->bacam_ver == RTW89_BACAM_V0_EXT)
2543 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1);
2544 else
2545 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX);
2546 if (!valid)
2547 goto end;
2548 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) |
2549 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID);
2550 if (params->buf_size > 64)
2551 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2552 else
2553 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE);
2554 /* If init req is set, hw will set the ssn */
2555 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) |
2556 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN);
2557
2558 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) {
2559 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) |
2560 le32_encode_bits(rtwvif_link->mac_idx,
2561 RTW89_H2C_BA_CAM_W1_BAND);
2562 }
2563
2564 end:
2565 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2566 H2C_CAT_MAC,
2567 H2C_CL_BA_CAM,
2568 H2C_FUNC_MAC_BA_CAM, 0, 1,
2569 len);
2570
2571 ret = rtw89_h2c_tx(rtwdev, skb, false);
2572 if (ret) {
2573 rtw89_err(rtwdev, "failed to send h2c\n");
2574 goto fail;
2575 }
2576
2577 return 0;
2578 fail:
2579 dev_kfree_skb_any(skb);
2580
2581 return ret;
2582 }
2583 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam);
2584
rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev * rtwdev,u8 entry_idx,u8 uid)2585 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev,
2586 u8 entry_idx, u8 uid)
2587 {
2588 struct rtw89_h2c_ba_cam *h2c;
2589 u32 len = sizeof(*h2c);
2590 struct sk_buff *skb;
2591 int ret;
2592
2593 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2594 if (!skb) {
2595 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n");
2596 return -ENOMEM;
2597 }
2598 skb_put(skb, len);
2599 h2c = (struct rtw89_h2c_ba_cam *)skb->data;
2600
2601 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID);
2602 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) |
2603 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) |
2604 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) |
2605 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN);
2606
2607 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2608 H2C_CAT_MAC,
2609 H2C_CL_BA_CAM,
2610 H2C_FUNC_MAC_BA_CAM, 0, 1,
2611 len);
2612
2613 ret = rtw89_h2c_tx(rtwdev, skb, false);
2614 if (ret) {
2615 rtw89_err(rtwdev, "failed to send h2c\n");
2616 goto fail;
2617 }
2618
2619 return 0;
2620 fail:
2621 dev_kfree_skb_any(skb);
2622
2623 return ret;
2624 }
2625
rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev * rtwdev)2626 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev)
2627 {
2628 const struct rtw89_chip_info *chip = rtwdev->chip;
2629 u8 entry_idx = chip->bacam_num;
2630 u8 uid = 0;
2631 int i;
2632
2633 for (i = 0; i < chip->bacam_dynamic_num; i++) {
2634 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid);
2635 entry_idx++;
2636 uid++;
2637 }
2638 }
2639
rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool valid,struct ieee80211_ampdu_params * params)2640 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev,
2641 struct rtw89_vif_link *rtwvif_link,
2642 struct rtw89_sta_link *rtwsta_link,
2643 bool valid, struct ieee80211_ampdu_params *params)
2644 {
2645 const struct rtw89_chip_info *chip = rtwdev->chip;
2646 struct rtw89_h2c_ba_cam_v1 *h2c;
2647 u8 macid = rtwsta_link->mac_id;
2648 u32 len = sizeof(*h2c);
2649 struct sk_buff *skb;
2650 u8 entry_idx;
2651 u8 bmap_size;
2652 int ret;
2653
2654 ret = valid ?
2655 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2656 &entry_idx) :
2657 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid,
2658 &entry_idx);
2659 if (ret) {
2660 /* it still works even if we don't have static BA CAM, because
2661 * hardware can create dynamic BA CAM automatically.
2662 */
2663 rtw89_debug(rtwdev, RTW89_DBG_TXRX,
2664 "failed to %s entry tid=%d for h2c ba cam\n",
2665 valid ? "alloc" : "free", params->tid);
2666 return 0;
2667 }
2668
2669 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2670 if (!skb) {
2671 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
2672 return -ENOMEM;
2673 }
2674 skb_put(skb, len);
2675 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data;
2676
2677 if (params->buf_size > 512)
2678 bmap_size = 10;
2679 else if (params->buf_size > 256)
2680 bmap_size = 8;
2681 else if (params->buf_size > 64)
2682 bmap_size = 4;
2683 else
2684 bmap_size = 0;
2685
2686 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) |
2687 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) |
2688 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) |
2689 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) |
2690 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) |
2691 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK);
2692
2693 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */
2694 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) |
2695 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) |
2696 le32_encode_bits(!!rtwvif_link->mac_idx,
2697 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL);
2698
2699 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2700 H2C_CAT_MAC,
2701 H2C_CL_BA_CAM,
2702 H2C_FUNC_MAC_BA_CAM_V1, 0, 1,
2703 len);
2704
2705 ret = rtw89_h2c_tx(rtwdev, skb, false);
2706 if (ret) {
2707 rtw89_err(rtwdev, "failed to send h2c\n");
2708 goto fail;
2709 }
2710
2711 return 0;
2712 fail:
2713 dev_kfree_skb_any(skb);
2714
2715 return ret;
2716 }
2717 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1);
2718
rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev * rtwdev,u8 users,u8 offset,u8 mac_idx)2719 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users,
2720 u8 offset, u8 mac_idx)
2721 {
2722 struct rtw89_h2c_ba_cam_init *h2c;
2723 u32 len = sizeof(*h2c);
2724 struct sk_buff *skb;
2725 int ret;
2726
2727 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
2728 if (!skb) {
2729 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n");
2730 return -ENOMEM;
2731 }
2732 skb_put(skb, len);
2733 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data;
2734
2735 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) |
2736 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) |
2737 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL);
2738
2739 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2740 H2C_CAT_MAC,
2741 H2C_CL_BA_CAM,
2742 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1,
2743 len);
2744
2745 ret = rtw89_h2c_tx(rtwdev, skb, false);
2746 if (ret) {
2747 rtw89_err(rtwdev, "failed to send h2c\n");
2748 goto fail;
2749 }
2750
2751 return 0;
2752 fail:
2753 dev_kfree_skb_any(skb);
2754
2755 return ret;
2756 }
2757
2758 #define H2C_LOG_CFG_LEN 12
rtw89_fw_h2c_fw_log(struct rtw89_dev * rtwdev,bool enable)2759 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
2760 {
2761 struct sk_buff *skb;
2762 u32 comp = 0;
2763 int ret;
2764
2765 if (enable)
2766 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
2767 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) |
2768 BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN);
2769
2770 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN);
2771 if (!skb) {
2772 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
2773 return -ENOMEM;
2774 }
2775
2776 skb_put(skb, H2C_LOG_CFG_LEN);
2777 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD);
2778 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
2779 SET_LOG_CFG_COMP(skb->data, comp);
2780 SET_LOG_CFG_COMP_EXT(skb->data, 0);
2781
2782 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
2783 H2C_CAT_MAC,
2784 H2C_CL_FW_INFO,
2785 H2C_FUNC_LOG_CFG, 0, 0,
2786 H2C_LOG_CFG_LEN);
2787
2788 ret = rtw89_h2c_tx(rtwdev, skb, false);
2789 if (ret) {
2790 rtw89_err(rtwdev, "failed to send h2c\n");
2791 goto fail;
2792 }
2793
2794 return 0;
2795 fail:
2796 dev_kfree_skb_any(skb);
2797
2798 return ret;
2799 }
2800
rtw89_eapol_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2801 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
2802 struct rtw89_vif_link *rtwvif_link)
2803 {
2804 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88,
2805 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
2806 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2807 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2808 struct rtw89_eapol_2_of_2 *eapol_pkt;
2809 struct ieee80211_bss_conf *bss_conf;
2810 struct ieee80211_hdr_3addr *hdr;
2811 struct sk_buff *skb;
2812 u8 key_des_ver;
2813
2814 if (rtw_wow->ptk_alg == 3)
2815 key_des_ver = 1;
2816 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2)
2817 key_des_ver = 2;
2818 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7)
2819 key_des_ver = 3;
2820 else
2821 key_des_ver = 0;
2822
2823 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt));
2824 if (!skb)
2825 return NULL;
2826
2827 hdr = skb_put_zero(skb, sizeof(*hdr));
2828 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
2829 IEEE80211_FCTL_TODS |
2830 IEEE80211_FCTL_PROTECTED);
2831
2832 rcu_read_lock();
2833
2834 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2835
2836 ether_addr_copy(hdr->addr1, bss_conf->bssid);
2837 ether_addr_copy(hdr->addr2, bss_conf->addr);
2838 ether_addr_copy(hdr->addr3, bss_conf->bssid);
2839
2840 rcu_read_unlock();
2841
2842 skb_put_zero(skb, sec_hdr_len);
2843
2844 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
2845 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody));
2846 eapol_pkt->key_des_ver = key_des_ver;
2847
2848 return skb;
2849 }
2850
rtw89_sa_query_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2851 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
2852 struct rtw89_vif_link *rtwvif_link)
2853 {
2854 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2855 struct ieee80211_bss_conf *bss_conf;
2856 struct ieee80211_hdr_3addr *hdr;
2857 struct rtw89_sa_query *sa_query;
2858 struct sk_buff *skb;
2859
2860 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query));
2861 if (!skb)
2862 return NULL;
2863
2864 hdr = skb_put_zero(skb, sizeof(*hdr));
2865 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2866 IEEE80211_STYPE_ACTION |
2867 IEEE80211_FCTL_PROTECTED);
2868
2869 rcu_read_lock();
2870
2871 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
2872
2873 ether_addr_copy(hdr->addr1, bss_conf->bssid);
2874 ether_addr_copy(hdr->addr2, bss_conf->addr);
2875 ether_addr_copy(hdr->addr3, bss_conf->bssid);
2876
2877 rcu_read_unlock();
2878
2879 skb_put_zero(skb, sec_hdr_len);
2880
2881 sa_query = skb_put_zero(skb, sizeof(*sa_query));
2882 sa_query->category = WLAN_CATEGORY_SA_QUERY;
2883 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE;
2884
2885 return skb;
2886 }
2887
rtw89_arp_response_get(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)2888 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
2889 struct rtw89_vif_link *rtwvif_link)
2890 {
2891 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
2892 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
2893 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
2894 struct ieee80211_hdr_3addr *hdr;
2895 struct rtw89_arp_rsp *arp_skb;
2896 struct arphdr *arp_hdr;
2897 struct sk_buff *skb;
2898 __le16 fc;
2899
2900 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb));
2901 if (!skb)
2902 return NULL;
2903
2904 hdr = skb_put_zero(skb, sizeof(*hdr));
2905
2906 if (rtw_wow->ptk_alg)
2907 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS |
2908 IEEE80211_FCTL_PROTECTED);
2909 else
2910 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
2911
2912 hdr->frame_control = fc;
2913 ether_addr_copy(hdr->addr1, rtwvif_link->bssid);
2914 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr);
2915 ether_addr_copy(hdr->addr3, rtwvif_link->bssid);
2916
2917 skb_put_zero(skb, sec_hdr_len);
2918
2919 arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
2920 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
2921 arp_skb->llc_type = htons(ETH_P_ARP);
2922
2923 arp_hdr = &arp_skb->arp_hdr;
2924 arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
2925 arp_hdr->ar_pro = htons(ETH_P_IP);
2926 arp_hdr->ar_hln = ETH_ALEN;
2927 arp_hdr->ar_pln = 4;
2928 arp_hdr->ar_op = htons(ARPOP_REPLY);
2929
2930 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr);
2931 arp_skb->sender_ip = rtwvif->ip_addr;
2932
2933 return skb;
2934 }
2935
rtw89_fw_h2c_add_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,enum rtw89_fw_pkt_ofld_type type,u8 * id)2936 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
2937 struct rtw89_vif_link *rtwvif_link,
2938 enum rtw89_fw_pkt_ofld_type type,
2939 u8 *id)
2940 {
2941 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
2942 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
2943 struct rtw89_pktofld_info *info;
2944 struct sk_buff *skb;
2945 int ret;
2946
2947 info = kzalloc_obj(*info);
2948 if (!info)
2949 return -ENOMEM;
2950
2951 switch (type) {
2952 case RTW89_PKT_OFLD_TYPE_PS_POLL:
2953 skb = ieee80211_pspoll_get(rtwdev->hw, vif);
2954 break;
2955 case RTW89_PKT_OFLD_TYPE_PROBE_RSP:
2956 skb = ieee80211_proberesp_get(rtwdev->hw, vif);
2957 break;
2958 case RTW89_PKT_OFLD_TYPE_NULL_DATA:
2959 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false);
2960 break;
2961 case RTW89_PKT_OFLD_TYPE_QOS_NULL:
2962 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true);
2963 break;
2964 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
2965 skb = rtw89_eapol_get(rtwdev, rtwvif_link);
2966 break;
2967 case RTW89_PKT_OFLD_TYPE_SA_QUERY:
2968 skb = rtw89_sa_query_get(rtwdev, rtwvif_link);
2969 break;
2970 case RTW89_PKT_OFLD_TYPE_ARP_RSP:
2971 skb = rtw89_arp_response_get(rtwdev, rtwvif_link);
2972 break;
2973 default:
2974 goto err;
2975 }
2976
2977 if (!skb)
2978 goto err;
2979
2980 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
2981 kfree_skb(skb);
2982
2983 if (ret)
2984 goto err;
2985
2986 list_add_tail(&info->list, &rtwvif_link->general_pkt_list);
2987 *id = info->id;
2988 return 0;
2989
2990 err:
2991 kfree(info);
2992 return -ENOMEM;
2993 }
2994
rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool notify_fw)2995 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
2996 struct rtw89_vif_link *rtwvif_link,
2997 bool notify_fw)
2998 {
2999 struct list_head *pkt_list = &rtwvif_link->general_pkt_list;
3000 struct rtw89_pktofld_info *info, *tmp;
3001
3002 list_for_each_entry_safe(info, tmp, pkt_list, list) {
3003 if (notify_fw)
3004 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
3005 else
3006 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id);
3007 list_del(&info->list);
3008 kfree(info);
3009 }
3010 }
3011
rtw89_fw_release_general_pkt_list(struct rtw89_dev * rtwdev,bool notify_fw)3012 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
3013 {
3014 struct rtw89_vif_link *rtwvif_link;
3015 struct rtw89_vif *rtwvif;
3016 unsigned int link_id;
3017
3018 rtw89_for_each_rtwvif(rtwdev, rtwvif)
3019 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
3020 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link,
3021 notify_fw);
3022 }
3023
3024 #define H2C_GENERAL_PKT_LEN 6
3025 #define H2C_GENERAL_PKT_ID_UND 0xff
rtw89_fw_h2c_general_pkt(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u8 macid)3026 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
3027 struct rtw89_vif_link *rtwvif_link, u8 macid)
3028 {
3029 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
3030 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
3031 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
3032 struct sk_buff *skb;
3033 int ret;
3034
3035 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
3036 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
3037 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
3038 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
3039 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
3040 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
3041
3042 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
3043 if (!skb) {
3044 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3045 return -ENOMEM;
3046 }
3047 skb_put(skb, H2C_GENERAL_PKT_LEN);
3048 SET_GENERAL_PKT_MACID(skb->data, macid);
3049 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
3050 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
3051 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
3052 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
3053 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
3054
3055 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3056 H2C_CAT_MAC,
3057 H2C_CL_FW_INFO,
3058 H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
3059 H2C_GENERAL_PKT_LEN);
3060
3061 ret = rtw89_h2c_tx(rtwdev, skb, false);
3062 if (ret) {
3063 rtw89_err(rtwdev, "failed to send h2c\n");
3064 goto fail;
3065 }
3066
3067 return 0;
3068 fail:
3069 dev_kfree_skb_any(skb);
3070
3071 return ret;
3072 }
3073
3074 #define H2C_LPS_PARM_LEN 8
rtw89_fw_h2c_lps_parm(struct rtw89_dev * rtwdev,struct rtw89_lps_parm * lps_param)3075 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
3076 struct rtw89_lps_parm *lps_param)
3077 {
3078 struct sk_buff *skb;
3079 bool done_ack;
3080 int ret;
3081
3082 if (RTW89_CHK_FW_FEATURE(LPS_DACK_BY_C2H_REG, &rtwdev->fw))
3083 done_ack = false;
3084 else
3085 done_ack = !lps_param->psmode;
3086
3087 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN);
3088 if (!skb) {
3089 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3090 return -ENOMEM;
3091 }
3092 skb_put(skb, H2C_LPS_PARM_LEN);
3093
3094 SET_LPS_PARM_MACID(skb->data, lps_param->macid);
3095 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
3096 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
3097 SET_LPS_PARM_RLBM(skb->data, 1);
3098 SET_LPS_PARM_SMARTPS(skb->data, 1);
3099 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
3100 SET_LPS_PARM_VOUAPSD(skb->data, 0);
3101 SET_LPS_PARM_VIUAPSD(skb->data, 0);
3102 SET_LPS_PARM_BEUAPSD(skb->data, 0);
3103 SET_LPS_PARM_BKUAPSD(skb->data, 0);
3104
3105 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3106 H2C_CAT_MAC,
3107 H2C_CL_MAC_PS,
3108 H2C_FUNC_MAC_LPS_PARM, 0, done_ack,
3109 H2C_LPS_PARM_LEN);
3110
3111 ret = rtw89_h2c_tx(rtwdev, skb, false);
3112 if (ret) {
3113 rtw89_err(rtwdev, "failed to send h2c\n");
3114 goto fail;
3115 }
3116
3117 return 0;
3118 fail:
3119 dev_kfree_skb_any(skb);
3120
3121 return ret;
3122 }
3123
rtw89_fw_h2c_lps_ch_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)3124 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
3125 {
3126 const struct rtw89_chip_info *chip = rtwdev->chip;
3127 const struct rtw89_chan *chan;
3128 struct rtw89_vif_link *rtwvif_link;
3129 struct rtw89_h2c_lps_ch_info *h2c;
3130 u32 len = sizeof(*h2c);
3131 unsigned int link_id;
3132 struct sk_buff *skb;
3133 bool no_chan = true;
3134 u8 phy_idx;
3135 u32 done;
3136 int ret;
3137
3138 if (chip->chip_gen != RTW89_CHIP_BE)
3139 return 0;
3140
3141 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3142 if (!skb) {
3143 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n");
3144 return -ENOMEM;
3145 }
3146 skb_put(skb, len);
3147 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data;
3148
3149 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
3150 phy_idx = rtwvif_link->phy_idx;
3151 if (phy_idx >= ARRAY_SIZE(h2c->info))
3152 continue;
3153
3154 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3155 no_chan = false;
3156
3157 h2c->info[phy_idx].central_ch = chan->channel;
3158 h2c->info[phy_idx].pri_ch = chan->primary_channel;
3159 h2c->info[phy_idx].band = chan->band_type;
3160 h2c->info[phy_idx].bw = chan->band_width;
3161 }
3162
3163 if (no_chan) {
3164 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n");
3165 ret = -ENOENT;
3166 goto fail;
3167 }
3168
3169 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode);
3170
3171 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3172 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
3173 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
3174
3175 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
3176 ret = rtw89_h2c_tx(rtwdev, skb, false);
3177 if (ret) {
3178 rtw89_err(rtwdev, "failed to send h2c\n");
3179 goto fail;
3180 }
3181
3182 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
3183 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
3184 if (ret)
3185 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n");
3186
3187 return 0;
3188 fail:
3189 dev_kfree_skb_any(skb);
3190
3191 return ret;
3192 }
3193
rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)3194 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev,
3195 struct rtw89_vif *rtwvif)
3196 {
3197 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
3198 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
3199 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12};
3200 const struct rtw89_chip_info *chip = rtwdev->chip;
3201 struct rtw89_efuse *efuse = &rtwdev->efuse;
3202 struct rtw89_h2c_lps_ml_cmn_info *h2c;
3203 struct rtw89_vif_link *rtwvif_link;
3204 const struct rtw89_chan *chan;
3205 u8 bw_idx = RTW89_BB_BW_20_40;
3206 u32 len = sizeof(*h2c);
3207 unsigned int link_id;
3208 struct sk_buff *skb;
3209 u8 beacon_bw_ofst;
3210 u8 gain_band;
3211 u32 done;
3212 u8 path;
3213 int ret;
3214 int i;
3215
3216 if (chip->chip_gen != RTW89_CHIP_BE)
3217 return 0;
3218
3219 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3220 if (!skb) {
3221 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n");
3222 return -ENOMEM;
3223 }
3224 skb_put(skb, len);
3225 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data;
3226
3227 h2c->fmt_id = 0x3;
3228
3229 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
3230 h2c->rfe_type = efuse->rfe_type;
3231
3232 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
3233 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A;
3234 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3235 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
3236
3237 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel;
3238 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel;
3239 h2c->band[rtwvif_link->phy_idx] = chan->band_type;
3240 h2c->bw[rtwvif_link->phy_idx] = chan->band_width;
3241 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6)
3242 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1;
3243 else
3244 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2;
3245
3246 /* Fill BW20 RX gain table for beacon mode */
3247 for (i = 0; i < TIA_GAIN_NUM; i++) {
3248 h2c->tia_gain[rtwvif_link->phy_idx][i] =
3249 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
3250 }
3251
3252 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) {
3253 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx];
3254 h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst;
3255 }
3256
3257 memcpy(h2c->lna_gain[rtwvif_link->phy_idx],
3258 gain->lna_gain[gain_band][bw_idx][path],
3259 LNA_GAIN_NUM);
3260 memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx],
3261 gain->tia_lna_op1db[gain_band][bw_idx][path],
3262 LNA_GAIN_NUM + 1);
3263 memcpy(h2c->lna_op1db[rtwvif_link->phy_idx],
3264 gain->lna_op1db[gain_band][bw_idx][path],
3265 LNA_GAIN_NUM);
3266 }
3267
3268 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3269 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
3270 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
3271
3272 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
3273 ret = rtw89_h2c_tx(rtwdev, skb, false);
3274 if (ret) {
3275 rtw89_err(rtwdev, "failed to send h2c\n");
3276 goto fail;
3277 }
3278
3279 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
3280 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
3281 if (ret)
3282 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
3283
3284 return 0;
3285 fail:
3286 dev_kfree_skb_any(skb);
3287
3288 return ret;
3289 }
3290
rtw89_bb_lps_cmn_info_rx_gain_fill(struct rtw89_dev * rtwdev,struct rtw89_bb_link_info_rx_gain * h2c_gain,const struct rtw89_chan * chan,u8 phy_idx)3291 void rtw89_bb_lps_cmn_info_rx_gain_fill(struct rtw89_dev *rtwdev,
3292 struct rtw89_bb_link_info_rx_gain *h2c_gain,
3293 const struct rtw89_chan *chan, u8 phy_idx)
3294 {
3295 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be;
3296 enum rtw89_bb_link_rx_gain_table_type tab_idx;
3297 struct rtw89_chan chan_bcn;
3298 u8 bw = chan->band_width;
3299 u8 gain_band;
3300 u8 bw_idx;
3301 u8 path;
3302 int i;
3303
3304 rtw89_chan_create(&chan_bcn, chan->primary_channel, chan->primary_channel,
3305 chan->band_type, RTW89_CHANNEL_WIDTH_20);
3306
3307 for (tab_idx = RTW89_BB_PS_LINK_RX_GAIN_TAB_BCN_PATH_A;
3308 tab_idx < RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX; tab_idx++) {
3309 struct rtw89_phy_calc_efuse_gain calc = {};
3310
3311 path = (tab_idx & BIT(0)) ? (RF_PATH_B) : (RF_PATH_A);
3312 if (tab_idx & BIT(1)) {
3313 rtw89_chip_calc_rx_gain_normal(rtwdev, chan, path, phy_idx,
3314 &calc);
3315 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type);
3316 if (bw > RTW89_CHANNEL_WIDTH_40)
3317 bw_idx = RTW89_BB_BW_80_160_320;
3318 else
3319 bw_idx = RTW89_BB_BW_20_40;
3320 } else {
3321 rtw89_chip_calc_rx_gain_normal(rtwdev, &chan_bcn, path, phy_idx,
3322 &calc);
3323 gain_band = rtw89_subband_to_gain_band_be(chan_bcn.subband_type);
3324 bw_idx = RTW89_BB_BW_20_40;
3325 }
3326
3327 /* efuse ofst and comp */
3328 h2c_gain->gain_ofst[tab_idx] = calc.rssi_ofst;
3329 h2c_gain->cck_gain_ofst[tab_idx] = calc.cck_rpl_ofst;
3330 h2c_gain->cck_rpl_bias_comp[tab_idx][0] = calc.cck_mean_gain_bias;
3331 h2c_gain->cck_rpl_bias_comp[tab_idx][1] = calc.cck_mean_gain_bias;
3332
3333 for (i = 0; i < TIA_GAIN_NUM; i++) {
3334 h2c_gain->gain_err_tia[tab_idx][i] =
3335 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]);
3336 }
3337 memcpy(h2c_gain->gain_err_lna[tab_idx],
3338 gain->lna_gain[gain_band][bw_idx][path],
3339 LNA_GAIN_NUM);
3340 memcpy(h2c_gain->op1db_lna[tab_idx],
3341 gain->lna_op1db[gain_band][bw_idx][path],
3342 LNA_GAIN_NUM);
3343 memcpy(h2c_gain->op1db_tia[tab_idx],
3344 gain->tia_lna_op1db[gain_band][bw_idx][path],
3345 LNA_GAIN_NUM + 1);
3346
3347 memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._20M,
3348 gain->rpl_ofst_20[gain_band][path],
3349 RTW89_BW20_SC_20M);
3350 memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._40M,
3351 gain->rpl_ofst_40[gain_band][path],
3352 RTW89_BW20_SC_40M);
3353 memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._80M,
3354 gain->rpl_ofst_80[gain_band][path],
3355 RTW89_BW20_SC_80M);
3356 memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._160M,
3357 gain->rpl_ofst_160[gain_band][path],
3358 RTW89_BW20_SC_160M);
3359 }
3360 }
3361
rtw89_fw_h2c_lps_ml_cmn_info_v1(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)3362 int rtw89_fw_h2c_lps_ml_cmn_info_v1(struct rtw89_dev *rtwdev,
3363 struct rtw89_vif *rtwvif)
3364 {
3365 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12};
3366 const struct rtw89_chip_info *chip = rtwdev->chip;
3367 struct rtw89_efuse *efuse = &rtwdev->efuse;
3368 struct rtw89_h2c_lps_ml_cmn_info_v1 *h2c;
3369 struct rtw89_vif_link *rtwvif_link;
3370 const struct rtw89_chan *chan;
3371 struct rtw89_bb_ctx *bb;
3372 u32 len = sizeof(*h2c);
3373 unsigned int link_id;
3374 struct sk_buff *skb;
3375 u8 beacon_bw_ofst;
3376 u32 done;
3377 int ret;
3378
3379 if (chip->chip_gen != RTW89_CHIP_BE)
3380 return 0;
3381
3382 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3383 if (!skb) {
3384 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info_v1\n");
3385 return -ENOMEM;
3386 }
3387 skb_put(skb, len);
3388 h2c = (struct rtw89_h2c_lps_ml_cmn_info_v1 *)skb->data;
3389
3390 h2c->fmt_id = 0x20;
3391
3392 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
3393 h2c->rfe_type = efuse->rfe_type;
3394 h2c->rssi_main = U8_MAX;
3395
3396 memset(h2c->link_id, 0xfe, RTW89_BB_PS_LINK_BUF_MAX);
3397
3398 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
3399 u8 phy_idx = rtwvif_link->phy_idx;
3400
3401 bb = rtw89_get_bb_ctx(rtwdev, phy_idx);
3402 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3403
3404 h2c->link_id[phy_idx] = phy_idx;
3405 h2c->central_ch[phy_idx] = chan->channel;
3406 h2c->pri_ch[phy_idx] = chan->primary_channel;
3407 h2c->band[phy_idx] = chan->band_type;
3408 h2c->bw[phy_idx] = chan->band_width;
3409
3410 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) {
3411 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx];
3412 h2c->dup_bcn_ofst[phy_idx] = beacon_bw_ofst;
3413 }
3414
3415 if (h2c->rssi_main > bb->ch_info.rssi_min)
3416 h2c->rssi_main = bb->ch_info.rssi_min;
3417
3418 rtw89_bb_lps_cmn_info_rx_gain_fill(rtwdev,
3419 &h2c->rx_gain[phy_idx],
3420 chan, phy_idx);
3421 }
3422
3423 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3424 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
3425 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len);
3426
3427 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT_BE4, B_CHK_LPS_STAT, 0);
3428 ret = rtw89_h2c_tx(rtwdev, skb, false);
3429 if (ret) {
3430 rtw89_err(rtwdev, "failed to send h2c\n");
3431 goto fail;
3432 }
3433
3434 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
3435 true, rtwdev, R_CHK_LPS_STAT_BE4, B_CHK_LPS_STAT);
3436 if (ret)
3437 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n");
3438
3439 return 0;
3440 fail:
3441 dev_kfree_skb_any(skb);
3442
3443 return ret;
3444 }
3445
3446 #define H2C_P2P_ACT_LEN 20
rtw89_fw_h2c_p2p_act(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_p2p_noa_desc * desc,u8 act,u8 noa_id,u8 ctwindow_oppps)3447 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev,
3448 struct rtw89_vif_link *rtwvif_link,
3449 struct ieee80211_p2p_noa_desc *desc,
3450 u8 act, u8 noa_id, u8 ctwindow_oppps)
3451 {
3452 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT;
3453 struct sk_buff *skb;
3454 u8 *cmd;
3455 int ret;
3456
3457 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN);
3458 if (!skb) {
3459 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
3460 return -ENOMEM;
3461 }
3462 skb_put(skb, H2C_P2P_ACT_LEN);
3463 cmd = skb->data;
3464
3465 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id);
3466 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0);
3467 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id);
3468 RTW89_SET_FWCMD_P2P_ACT(cmd, act);
3469 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc);
3470 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0);
3471 if (desc) {
3472 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time);
3473 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval);
3474 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration);
3475 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count);
3476 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps);
3477 }
3478
3479 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3480 H2C_CAT_MAC, H2C_CL_MAC_PS,
3481 H2C_FUNC_P2P_ACT, 0, 0,
3482 H2C_P2P_ACT_LEN);
3483
3484 ret = rtw89_h2c_tx(rtwdev, skb, false);
3485 if (ret) {
3486 rtw89_err(rtwdev, "failed to send h2c\n");
3487 goto fail;
3488 }
3489
3490 return 0;
3491 fail:
3492 dev_kfree_skb_any(skb);
3493
3494 return ret;
3495 }
3496
__rtw89_fw_h2c_set_tx_path(struct rtw89_dev * rtwdev,struct sk_buff * skb)3497 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev,
3498 struct sk_buff *skb)
3499 {
3500 const struct rtw89_chip_info *chip = rtwdev->chip;
3501 struct rtw89_hal *hal = &rtwdev->hal;
3502 u8 ntx_path;
3503 u8 map_b;
3504
3505 if (chip->rf_path_num == 1) {
3506 ntx_path = RF_A;
3507 map_b = 0;
3508 } else {
3509 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB;
3510 map_b = ntx_path == RF_AB ? 1 : 0;
3511 }
3512
3513 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
3514 SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
3515 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
3516 SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
3517 SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
3518 }
3519
3520 #define H2C_CMC_TBL_LEN 68
rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3521 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
3522 struct rtw89_vif_link *rtwvif_link,
3523 struct rtw89_sta_link *rtwsta_link)
3524 {
3525 const struct rtw89_chip_info *chip = rtwdev->chip;
3526 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3527 struct sk_buff *skb;
3528 int ret;
3529
3530 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3531 if (!skb) {
3532 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3533 return -ENOMEM;
3534 }
3535 skb_put(skb, H2C_CMC_TBL_LEN);
3536 SET_CTRL_INFO_MACID(skb->data, macid);
3537 SET_CTRL_INFO_OPERATION(skb->data, 1);
3538 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3539 SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
3540 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
3541 SET_CMC_TBL_ANTSEL_A(skb->data, 0);
3542 SET_CMC_TBL_ANTSEL_B(skb->data, 0);
3543 SET_CMC_TBL_ANTSEL_C(skb->data, 0);
3544 SET_CMC_TBL_ANTSEL_D(skb->data, 0);
3545 }
3546 SET_CMC_TBL_MGQ_RPT_EN(skb->data, rtwdev->hci.tx_rpt_enabled);
3547 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
3548 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
3549 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3550 SET_CMC_TBL_DATA_DCM(skb->data, 0);
3551
3552 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3553 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3554 chip->h2c_cctl_func_id, 0, 1,
3555 H2C_CMC_TBL_LEN);
3556
3557 ret = rtw89_h2c_tx(rtwdev, skb, false);
3558 if (ret) {
3559 rtw89_err(rtwdev, "failed to send h2c\n");
3560 goto fail;
3561 }
3562
3563 return 0;
3564 fail:
3565 dev_kfree_skb_any(skb);
3566
3567 return ret;
3568 }
3569 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl);
3570
rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3571 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3572 struct rtw89_vif_link *rtwvif_link,
3573 struct rtw89_sta_link *rtwsta_link)
3574 {
3575 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3576 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3577 u32 len = sizeof(*h2c);
3578 struct sk_buff *skb;
3579 int ret;
3580
3581 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3582 if (!skb) {
3583 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3584 return -ENOMEM;
3585 }
3586 skb_put(skb, len);
3587 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3588
3589 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3590 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3591
3592 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE) |
3593 le32_encode_bits(rtwdev->hci.tx_rpt_enabled, CCTLINFO_G7_W0_MGQ_RPT_EN);
3594 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL);
3595
3596 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) |
3597 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) |
3598 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
3599 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL);
3600
3601 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL);
3602
3603 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL);
3604
3605 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
3606 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL);
3607
3608 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
3609 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
3610 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
3611 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
3612 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
3613 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL);
3614
3615 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE);
3616 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL);
3617
3618 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) |
3619 le32_encode_bits(1, CCTLINFO_G7_W7_NR) |
3620 le32_encode_bits(1, CCTLINFO_G7_W7_CB) |
3621 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) |
3622 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE);
3623 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL);
3624
3625 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL);
3626
3627 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) |
3628 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) |
3629 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L);
3630 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL);
3631
3632 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) |
3633 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) |
3634 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE);
3635 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL);
3636
3637 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3638 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3639 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3640 len);
3641
3642 ret = rtw89_h2c_tx(rtwdev, skb, false);
3643 if (ret) {
3644 rtw89_err(rtwdev, "failed to send h2c\n");
3645 goto fail;
3646 }
3647
3648 return 0;
3649 fail:
3650 dev_kfree_skb_any(skb);
3651
3652 return ret;
3653 }
3654 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7);
3655
rtw89_fw_h2c_default_cmac_tbl_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3656 int rtw89_fw_h2c_default_cmac_tbl_be(struct rtw89_dev *rtwdev,
3657 struct rtw89_vif_link *rtwvif_link,
3658 struct rtw89_sta_link *rtwsta_link)
3659 {
3660 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3661 bool preld = rtw89_mac_chk_preload_allow(rtwdev);
3662 struct rtw89_h2c_cctlinfo_ud_be *h2c;
3663 u32 len = sizeof(*h2c);
3664 struct sk_buff *skb;
3665 int ret;
3666
3667 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3668 if (!skb) {
3669 rtw89_err(rtwdev, "failed to alloc skb for default cmac be\n");
3670 return -ENOMEM;
3671 }
3672 skb_put(skb, len);
3673 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
3674
3675 h2c->c0 = le32_encode_bits(mac_id, BE_CCTL_INFO_C0_V1_MACID) |
3676 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
3677
3678 h2c->w0 = le32_encode_bits(4, BE_CCTL_INFO_W0_DATARATE);
3679 h2c->m0 = cpu_to_le32(BE_CCTL_INFO_W0_ALL);
3680
3681 h2c->w1 = le32_encode_bits(4, BE_CCTL_INFO_W1_DATA_RTY_LOWEST_RATE) |
3682 le32_encode_bits(0xa, BE_CCTL_INFO_W1_RTSRATE) |
3683 le32_encode_bits(4, BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE);
3684 h2c->m1 = cpu_to_le32(BE_CCTL_INFO_W1_ALL);
3685
3686 h2c->w1 = le32_encode_bits(preld, BE_CCTL_INFO_W2_PRELOAD_ENABLE);
3687 h2c->m2 = cpu_to_le32(BE_CCTL_INFO_W2_ALL);
3688
3689 h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_ALL);
3690
3691 h2c->w4 = le32_encode_bits(0xFFFF, BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
3692 h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_ALL);
3693
3694 h2c->w5 = le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1) |
3695 le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1) |
3696 le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1) |
3697 le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1) |
3698 le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1);
3699 h2c->m5 = cpu_to_le32(BE_CCTL_INFO_W5_ALL);
3700
3701 h2c->w6 = le32_encode_bits(0xb, BE_CCTL_INFO_W6_RESP_REF_RATE);
3702 h2c->m6 = cpu_to_le32(BE_CCTL_INFO_W6_ALL);
3703
3704 h2c->w7 = le32_encode_bits(1, BE_CCTL_INFO_W7_NC) |
3705 le32_encode_bits(1, BE_CCTL_INFO_W7_NR) |
3706 le32_encode_bits(1, BE_CCTL_INFO_W7_CB) |
3707 le32_encode_bits(0x1, BE_CCTL_INFO_W7_CSI_PARA_EN) |
3708 le32_encode_bits(0xb, BE_CCTL_INFO_W7_CSI_FIX_RATE);
3709 h2c->m7 = cpu_to_le32(BE_CCTL_INFO_W7_ALL);
3710
3711 h2c->m8 = cpu_to_le32(BE_CCTL_INFO_W8_ALL);
3712
3713 h2c->w14 = le32_encode_bits(0, BE_CCTL_INFO_W14_VO_CURR_RATE) |
3714 le32_encode_bits(0, BE_CCTL_INFO_W14_VI_CURR_RATE) |
3715 le32_encode_bits(0, BE_CCTL_INFO_W14_BE_CURR_RATE_L);
3716 h2c->m14 = cpu_to_le32(BE_CCTL_INFO_W14_ALL);
3717
3718 h2c->w15 = le32_encode_bits(0, BE_CCTL_INFO_W15_BE_CURR_RATE_H) |
3719 le32_encode_bits(0, BE_CCTL_INFO_W15_BK_CURR_RATE) |
3720 le32_encode_bits(0, BE_CCTL_INFO_W15_MGNT_CURR_RATE);
3721 h2c->m15 = cpu_to_le32(BE_CCTL_INFO_W15_ALL);
3722
3723 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3724 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3725 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
3726 len);
3727
3728 ret = rtw89_h2c_tx(rtwdev, skb, false);
3729 if (ret) {
3730 rtw89_err(rtwdev, "failed to send h2c\n");
3731 goto fail;
3732 }
3733
3734 return 0;
3735 fail:
3736 dev_kfree_skb_any(skb);
3737
3738 return ret;
3739 }
3740 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_be);
3741
__get_sta_he_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_link_sta * link_sta,u8 * pads)3742 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
3743 struct ieee80211_link_sta *link_sta,
3744 u8 *pads)
3745 {
3746 bool ppe_th;
3747 u8 ppe16, ppe8;
3748 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3749 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0];
3750 u8 ru_bitmap;
3751 u8 n, idx, sh;
3752 u16 ppe;
3753 int i;
3754
3755 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
3756 link_sta->he_cap.he_cap_elem.phy_cap_info[6]);
3757 if (!ppe_th) {
3758 u8 pad;
3759
3760 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK,
3761 link_sta->he_cap.he_cap_elem.phy_cap_info[9]);
3762
3763 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3764 pads[i] = pad;
3765
3766 return;
3767 }
3768
3769 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
3770 n = hweight8(ru_bitmap);
3771 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3772
3773 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3774 if (!(ru_bitmap & BIT(i))) {
3775 pads[i] = 1;
3776 continue;
3777 }
3778
3779 idx = n >> 3;
3780 sh = n & 7;
3781 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
3782
3783 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx]));
3784 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3785 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
3786 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3787
3788 if (ppe16 != 7 && ppe8 == 7)
3789 pads[i] = RTW89_PE_DURATION_16;
3790 else if (ppe8 != 7)
3791 pads[i] = RTW89_PE_DURATION_8;
3792 else
3793 pads[i] = RTW89_PE_DURATION_0;
3794 }
3795 }
3796
rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3797 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
3798 struct rtw89_vif_link *rtwvif_link,
3799 struct rtw89_sta_link *rtwsta_link)
3800 {
3801 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3802 const struct rtw89_chip_info *chip = rtwdev->chip;
3803 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
3804 rtwvif_link->chanctx_idx);
3805 struct ieee80211_link_sta *link_sta;
3806 struct sk_buff *skb;
3807 u8 pads[RTW89_PPE_BW_NUM];
3808 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3809 u16 lowest_rate;
3810 int ret;
3811
3812 memset(pads, 0, sizeof(pads));
3813
3814 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
3815 if (!skb) {
3816 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
3817 return -ENOMEM;
3818 }
3819
3820 rcu_read_lock();
3821
3822 if (rtwsta_link)
3823 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3824
3825 if (rtwsta_link && link_sta->he_cap.has_he)
3826 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3827
3828 if (vif->p2p)
3829 lowest_rate = RTW89_HW_RATE_OFDM6;
3830 else if (chan->band_type == RTW89_BAND_2G)
3831 lowest_rate = RTW89_HW_RATE_CCK1;
3832 else
3833 lowest_rate = RTW89_HW_RATE_OFDM6;
3834
3835 skb_put(skb, H2C_CMC_TBL_LEN);
3836 SET_CTRL_INFO_MACID(skb->data, mac_id);
3837 SET_CTRL_INFO_OPERATION(skb->data, 1);
3838 SET_CMC_TBL_DISRTSFB(skb->data, 1);
3839 SET_CMC_TBL_DISDATAFB(skb->data, 1);
3840 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate);
3841 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
3842 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
3843 if (vif->type == NL80211_IFTYPE_STATION)
3844 SET_CMC_TBL_ULDL(skb->data, 1);
3845 else
3846 SET_CMC_TBL_ULDL(skb->data, 0);
3847 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port);
3848 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) {
3849 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3850 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3851 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3852 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3853 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) {
3854 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
3855 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
3856 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
3857 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]);
3858 }
3859 if (rtwsta_link)
3860 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data,
3861 link_sta->he_cap.has_he);
3862 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE)
3863 SET_CMC_TBL_DATA_DCM(skb->data, 0);
3864
3865 rcu_read_unlock();
3866
3867 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
3868 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
3869 chip->h2c_cctl_func_id, 0, 1,
3870 H2C_CMC_TBL_LEN);
3871
3872 ret = rtw89_h2c_tx(rtwdev, skb, false);
3873 if (ret) {
3874 rtw89_err(rtwdev, "failed to send h2c\n");
3875 goto fail;
3876 }
3877
3878 return 0;
3879 fail:
3880 dev_kfree_skb_any(skb);
3881
3882 return ret;
3883 }
3884 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl);
3885
__get_sta_eht_pkt_padding(struct rtw89_dev * rtwdev,struct ieee80211_link_sta * link_sta,u8 * pads)3886 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
3887 struct ieee80211_link_sta *link_sta,
3888 u8 *pads)
3889 {
3890 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1;
3891 u16 ppe_thres_hdr;
3892 u8 ppe16, ppe8;
3893 u8 n, idx, sh;
3894 u8 ru_bitmap;
3895 bool ppe_th;
3896 u16 ppe;
3897 int i;
3898
3899 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3900 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT);
3901 if (!ppe_th) {
3902 u8 pad;
3903
3904 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5],
3905 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK);
3906
3907 for (i = 0; i < RTW89_PPE_BW_NUM; i++)
3908 pads[i] = pad;
3909
3910 return;
3911 }
3912
3913 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres);
3914 ru_bitmap = u16_get_bits(ppe_thres_hdr,
3915 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK);
3916 n = hweight8(ru_bitmap);
3917 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE +
3918 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
3919
3920 for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
3921 if (!(ru_bitmap & BIT(i))) {
3922 pads[i] = 1;
3923 continue;
3924 }
3925
3926 idx = n >> 3;
3927 sh = n & 7;
3928 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2;
3929
3930 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx);
3931 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3932 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE;
3933 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
3934
3935 if (ppe16 != 7 && ppe8 == 7)
3936 pads[i] = RTW89_PE_DURATION_16_20;
3937 else if (ppe8 != 7)
3938 pads[i] = RTW89_PE_DURATION_8;
3939 else
3940 pads[i] = RTW89_PE_DURATION_0;
3941 }
3942 }
3943
rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)3944 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev,
3945 struct rtw89_vif_link *rtwvif_link,
3946 struct rtw89_sta_link *rtwsta_link)
3947 {
3948 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
3949 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
3950 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
3951 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
3952 struct ieee80211_bss_conf *bss_conf;
3953 struct ieee80211_link_sta *link_sta;
3954 u8 pads[RTW89_PPE_BW_NUM];
3955 u32 len = sizeof(*h2c);
3956 struct sk_buff *skb;
3957 u16 lowest_rate;
3958 int ret;
3959
3960 memset(pads, 0, sizeof(pads));
3961
3962 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
3963 if (!skb) {
3964 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n");
3965 return -ENOMEM;
3966 }
3967
3968 rcu_read_lock();
3969
3970 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
3971
3972 if (rtwsta_link) {
3973 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
3974
3975 if (link_sta->eht_cap.has_eht)
3976 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads);
3977 else if (link_sta->he_cap.has_he)
3978 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
3979 }
3980
3981 if (vif->p2p)
3982 lowest_rate = RTW89_HW_RATE_OFDM6;
3983 else if (chan->band_type == RTW89_BAND_2G)
3984 lowest_rate = RTW89_HW_RATE_CCK1;
3985 else
3986 lowest_rate = RTW89_HW_RATE_OFDM6;
3987
3988 skb_put(skb, len);
3989 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
3990
3991 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) |
3992 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
3993
3994 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) |
3995 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB);
3996 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB |
3997 CCTLINFO_G7_W0_DISDATAFB);
3998
3999 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
4000 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE);
4001
4002 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
4003 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL);
4004
4005 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
4006 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL);
4007
4008 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID);
4009 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID);
4010
4011 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
4012 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM);
4013 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM);
4014 }
4015
4016 if (bss_conf->eht_support) {
4017 u16 punct = bss_conf->chanreq.oper.punctured;
4018
4019 h2c->w4 |= le32_encode_bits(~punct,
4020 CCTLINFO_G7_W4_ACT_SUBCH_CBW);
4021 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
4022 }
4023
4024 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
4025 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) |
4026 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
4027 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) |
4028 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
4029 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) |
4030 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
4031 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) |
4032 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
4033 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
4034 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 |
4035 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 |
4036 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 |
4037 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 |
4038 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4);
4039
4040 h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) |
4041 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
4042 CCTLINFO_G7_W6_ULDL);
4043 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL);
4044
4045 if (rtwsta_link) {
4046 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
4047 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
4048 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT);
4049 }
4050
4051 rcu_read_unlock();
4052
4053 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4054 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4055 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
4056 len);
4057
4058 ret = rtw89_h2c_tx(rtwdev, skb, false);
4059 if (ret) {
4060 rtw89_err(rtwdev, "failed to send h2c\n");
4061 goto fail;
4062 }
4063
4064 return 0;
4065 fail:
4066 dev_kfree_skb_any(skb);
4067
4068 return ret;
4069 }
4070 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7);
4071
rtw89_fw_h2c_assoc_cmac_tbl_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4072 int rtw89_fw_h2c_assoc_cmac_tbl_be(struct rtw89_dev *rtwdev,
4073 struct rtw89_vif_link *rtwvif_link,
4074 struct rtw89_sta_link *rtwsta_link)
4075 {
4076 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4077 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
4078 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
4079 struct rtw89_h2c_cctlinfo_ud_be *h2c;
4080 struct ieee80211_bss_conf *bss_conf;
4081 struct ieee80211_link_sta *link_sta;
4082 u8 pads[RTW89_PPE_BW_NUM];
4083 u32 len = sizeof(*h2c);
4084 struct sk_buff *skb;
4085 u16 lowest_rate;
4086 int ret;
4087
4088 memset(pads, 0, sizeof(pads));
4089
4090 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4091 if (!skb) {
4092 rtw89_err(rtwdev, "failed to alloc skb for assoc cmac be\n");
4093 return -ENOMEM;
4094 }
4095
4096 rcu_read_lock();
4097
4098 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
4099
4100 if (rtwsta_link) {
4101 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
4102
4103 if (link_sta->eht_cap.has_eht)
4104 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads);
4105 else if (link_sta->he_cap.has_he)
4106 __get_sta_he_pkt_padding(rtwdev, link_sta, pads);
4107 }
4108
4109 if (vif->p2p)
4110 lowest_rate = RTW89_HW_RATE_OFDM6;
4111 else if (chan->band_type == RTW89_BAND_2G)
4112 lowest_rate = RTW89_HW_RATE_CCK1;
4113 else
4114 lowest_rate = RTW89_HW_RATE_OFDM6;
4115
4116 skb_put(skb, len);
4117 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
4118
4119 h2c->c0 = le32_encode_bits(mac_id, BE_CCTL_INFO_C0_V1_MACID) |
4120 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
4121
4122 h2c->w0 = le32_encode_bits(1, BE_CCTL_INFO_W0_DISRTSFB) |
4123 le32_encode_bits(1, BE_CCTL_INFO_W0_DISDATAFB);
4124 h2c->m0 = cpu_to_le32(BE_CCTL_INFO_W0_DISRTSFB |
4125 BE_CCTL_INFO_W0_DISDATAFB);
4126
4127 h2c->w1 = le32_encode_bits(lowest_rate, BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE);
4128 h2c->m1 = cpu_to_le32(BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE);
4129
4130 h2c->w2 = le32_encode_bits(0, BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL);
4131 h2c->m2 = cpu_to_le32(BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL);
4132
4133 h2c->w3 = le32_encode_bits(0, BE_CCTL_INFO_W3_RTS_TXCNT_LMT_SEL);
4134 h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_RTS_TXCNT_LMT_SEL);
4135
4136 h2c->w4 = le32_encode_bits(rtwvif_link->port, BE_CCTL_INFO_W4_MULTI_PORT_ID);
4137 h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_MULTI_PORT_ID);
4138
4139 if (bss_conf->eht_support) {
4140 u16 punct = bss_conf->chanreq.oper.punctured;
4141
4142 h2c->w4 |= le32_encode_bits(~punct,
4143 BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
4144 h2c->m4 |= cpu_to_le32(BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
4145 }
4146
4147 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20],
4148 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1) |
4149 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40],
4150 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1) |
4151 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80],
4152 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1) |
4153 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160],
4154 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1) |
4155 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320],
4156 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1);
4157 h2c->m5 = cpu_to_le32(BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1 |
4158 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1 |
4159 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1 |
4160 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1 |
4161 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1);
4162
4163 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
4164 h2c->w5 |= le32_encode_bits(0, BE_CCTL_INFO_W5_DATA_DCM_V1);
4165 h2c->m5 |= cpu_to_le32(BE_CCTL_INFO_W5_DATA_DCM_V1);
4166 }
4167
4168 h2c->w6 = le32_encode_bits(vif->cfg.aid, BE_CCTL_INFO_W6_AID12_PAID) |
4169 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0,
4170 BE_CCTL_INFO_W6_ULDL);
4171 h2c->m6 = cpu_to_le32(BE_CCTL_INFO_W6_AID12_PAID | BE_CCTL_INFO_W6_ULDL);
4172
4173 if (rtwsta_link) {
4174 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he,
4175 BE_CCTL_INFO_W8_BSR_QUEUE_SIZE_FORMAT_V1);
4176 h2c->m8 = cpu_to_le32(BE_CCTL_INFO_W8_BSR_QUEUE_SIZE_FORMAT_V1);
4177 }
4178
4179 rcu_read_unlock();
4180
4181 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4182 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4183 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
4184 len);
4185
4186 ret = rtw89_h2c_tx(rtwdev, skb, false);
4187 if (ret) {
4188 rtw89_err(rtwdev, "failed to send h2c\n");
4189 goto fail;
4190 }
4191
4192 return 0;
4193 fail:
4194 dev_kfree_skb_any(skb);
4195
4196 return ret;
4197 }
4198 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_be);
4199
rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4200 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev,
4201 struct rtw89_vif_link *rtwvif_link,
4202 struct rtw89_sta_link *rtwsta_link)
4203 {
4204 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
4205 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
4206 u32 len = sizeof(*h2c);
4207 struct sk_buff *skb;
4208 u16 agg_num = 0;
4209 u8 ba_bmap = 0;
4210 int ret;
4211 u8 tid;
4212
4213 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4214 if (!skb) {
4215 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n");
4216 return -ENOMEM;
4217 }
4218 skb_put(skb, len);
4219 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
4220
4221 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
4222 if (agg_num == 0)
4223 agg_num = rtwsta->ampdu_params[tid].agg_num;
4224 else
4225 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
4226 }
4227
4228 if (agg_num <= 0x20)
4229 ba_bmap = 3;
4230 else if (agg_num > 0x20 && agg_num <= 0x40)
4231 ba_bmap = 0;
4232 else if (agg_num > 0x40 && agg_num <= 0x80)
4233 ba_bmap = 1;
4234 else if (agg_num > 0x80 && agg_num <= 0x100)
4235 ba_bmap = 2;
4236 else if (agg_num > 0x100 && agg_num <= 0x200)
4237 ba_bmap = 4;
4238 else if (agg_num > 0x200 && agg_num <= 0x400)
4239 ba_bmap = 5;
4240
4241 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
4242 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
4243
4244 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP);
4245 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP);
4246
4247 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4248 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4249 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
4250 len);
4251
4252 ret = rtw89_h2c_tx(rtwdev, skb, false);
4253 if (ret) {
4254 rtw89_err(rtwdev, "failed to send h2c\n");
4255 goto fail;
4256 }
4257
4258 return 0;
4259 fail:
4260 dev_kfree_skb_any(skb);
4261
4262 return ret;
4263 }
4264 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7);
4265
rtw89_fw_h2c_ampdu_cmac_tbl_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4266 int rtw89_fw_h2c_ampdu_cmac_tbl_be(struct rtw89_dev *rtwdev,
4267 struct rtw89_vif_link *rtwvif_link,
4268 struct rtw89_sta_link *rtwsta_link)
4269 {
4270 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta;
4271 struct rtw89_h2c_cctlinfo_ud_be *h2c;
4272 u32 len = sizeof(*h2c);
4273 struct sk_buff *skb;
4274 u16 agg_num = 0;
4275 u8 ba_bmap = 0;
4276 int ret;
4277 u8 tid;
4278
4279 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4280 if (!skb) {
4281 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac be\n");
4282 return -ENOMEM;
4283 }
4284 skb_put(skb, len);
4285 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
4286
4287 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) {
4288 if (agg_num == 0)
4289 agg_num = rtwsta->ampdu_params[tid].agg_num;
4290 else
4291 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num);
4292 }
4293
4294 if (agg_num <= 0x20)
4295 ba_bmap = 3;
4296 else if (agg_num > 0x20 && agg_num <= 0x40)
4297 ba_bmap = 0;
4298 else if (agg_num > 0x40 && agg_num <= 0x80)
4299 ba_bmap = 1;
4300 else if (agg_num > 0x80 && agg_num <= 0x100)
4301 ba_bmap = 2;
4302 else if (agg_num > 0x100 && agg_num <= 0x200)
4303 ba_bmap = 4;
4304 else if (agg_num > 0x200 && agg_num <= 0x400)
4305 ba_bmap = 5;
4306
4307 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) |
4308 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
4309
4310 h2c->w3 = le32_encode_bits(ba_bmap, BE_CCTL_INFO_W3_BA_BMAP);
4311 h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_BA_BMAP);
4312
4313 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4314 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4315 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0,
4316 len);
4317
4318 ret = rtw89_h2c_tx(rtwdev, skb, false);
4319 if (ret) {
4320 rtw89_err(rtwdev, "failed to send h2c\n");
4321 goto fail;
4322 }
4323
4324 return 0;
4325 fail:
4326 dev_kfree_skb_any(skb);
4327
4328 return ret;
4329 }
4330 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_be);
4331
rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)4332 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
4333 struct rtw89_sta_link *rtwsta_link)
4334 {
4335 const struct rtw89_chip_info *chip = rtwdev->chip;
4336 struct sk_buff *skb;
4337 int ret;
4338
4339 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
4340 if (!skb) {
4341 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
4342 return -ENOMEM;
4343 }
4344 skb_put(skb, H2C_CMC_TBL_LEN);
4345 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
4346 SET_CTRL_INFO_OPERATION(skb->data, 1);
4347 if (rtwsta_link->cctl_tx_time) {
4348 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
4349 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time);
4350 }
4351 if (rtwsta_link->cctl_tx_retry_limit) {
4352 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
4353 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt);
4354 }
4355
4356 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4357 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4358 chip->h2c_cctl_func_id, 0, 1,
4359 H2C_CMC_TBL_LEN);
4360
4361 ret = rtw89_h2c_tx(rtwdev, skb, false);
4362 if (ret) {
4363 rtw89_err(rtwdev, "failed to send h2c\n");
4364 goto fail;
4365 }
4366
4367 return 0;
4368 fail:
4369 dev_kfree_skb_any(skb);
4370
4371 return ret;
4372 }
4373 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl);
4374
rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)4375 int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev,
4376 struct rtw89_sta_link *rtwsta_link)
4377 {
4378 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
4379 u32 len = sizeof(*h2c);
4380 struct sk_buff *skb;
4381 int ret;
4382
4383 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4384 if (!skb) {
4385 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n");
4386 return -ENOMEM;
4387 }
4388 skb_put(skb, len);
4389 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
4390
4391 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) |
4392 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
4393
4394 if (rtwsta_link->cctl_tx_time) {
4395 h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL);
4396 h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL);
4397
4398 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time,
4399 CCTLINFO_G7_W2_AMPDU_MAX_TIME);
4400 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME);
4401 }
4402 if (rtwsta_link->cctl_tx_retry_limit) {
4403 h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) |
4404 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt,
4405 CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
4406 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL |
4407 CCTLINFO_G7_W2_DATA_TX_CNT_LMT);
4408 }
4409
4410 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4411 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4412 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
4413 len);
4414
4415 ret = rtw89_h2c_tx(rtwdev, skb, false);
4416 if (ret) {
4417 rtw89_err(rtwdev, "failed to send h2c\n");
4418 goto fail;
4419 }
4420
4421 return 0;
4422 fail:
4423 dev_kfree_skb_any(skb);
4424
4425 return ret;
4426 }
4427 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7);
4428
rtw89_fw_h2c_txtime_cmac_tbl_be(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)4429 int rtw89_fw_h2c_txtime_cmac_tbl_be(struct rtw89_dev *rtwdev,
4430 struct rtw89_sta_link *rtwsta_link)
4431 {
4432 struct rtw89_h2c_cctlinfo_ud_be *h2c;
4433 u32 len = sizeof(*h2c);
4434 struct sk_buff *skb;
4435 int ret;
4436
4437 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4438 if (!skb) {
4439 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_be\n");
4440 return -ENOMEM;
4441 }
4442 skb_put(skb, len);
4443 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
4444
4445 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) |
4446 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
4447
4448 if (rtwsta_link->cctl_tx_time) {
4449 h2c->w3 |= le32_encode_bits(1, BE_CCTL_INFO_W3_AMPDU_TIME_SEL);
4450 h2c->m3 |= cpu_to_le32(BE_CCTL_INFO_W3_AMPDU_TIME_SEL);
4451
4452 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time,
4453 BE_CCTL_INFO_W2_AMPDU_MAX_TIME);
4454 h2c->m2 |= cpu_to_le32(BE_CCTL_INFO_W2_AMPDU_MAX_TIME);
4455 }
4456 if (rtwsta_link->cctl_tx_retry_limit) {
4457 h2c->w2 |= le32_encode_bits(1, BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL) |
4458 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt,
4459 BE_CCTL_INFO_W2_DATA_TX_CNT_LMT);
4460 h2c->m2 |= cpu_to_le32(BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL |
4461 BE_CCTL_INFO_W2_DATA_TX_CNT_LMT);
4462 }
4463
4464 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4465 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4466 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
4467 len);
4468
4469 ret = rtw89_h2c_tx(rtwdev, skb, false);
4470 if (ret) {
4471 rtw89_err(rtwdev, "failed to send h2c\n");
4472 goto fail;
4473 }
4474
4475 return 0;
4476 fail:
4477 dev_kfree_skb_any(skb);
4478
4479 return ret;
4480 }
4481 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_be);
4482
rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u16 punctured)4483 int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev,
4484 struct rtw89_vif_link *rtwvif_link,
4485 u16 punctured)
4486 {
4487 struct rtw89_h2c_cctlinfo_ud_g7 *h2c;
4488 u32 len = sizeof(*h2c);
4489 struct sk_buff *skb;
4490 int ret;
4491
4492 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4493 if (!skb) {
4494 rtw89_err(rtwdev, "failed to alloc skb for punctured cmac g7\n");
4495 return -ENOMEM;
4496 }
4497
4498 skb_put(skb, len);
4499 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data;
4500
4501 h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, CCTLINFO_G7_C0_MACID) |
4502 le32_encode_bits(1, CCTLINFO_G7_C0_OP);
4503
4504 h2c->w4 = le32_encode_bits(~punctured, CCTLINFO_G7_W4_ACT_SUBCH_CBW);
4505 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW);
4506
4507 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4508 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4509 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
4510 len);
4511
4512 ret = rtw89_h2c_tx(rtwdev, skb, false);
4513 if (ret) {
4514 rtw89_err(rtwdev, "failed to send h2c\n");
4515 goto fail;
4516 }
4517
4518 return 0;
4519 fail:
4520 dev_kfree_skb_any(skb);
4521
4522 return ret;
4523 }
4524 EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_g7);
4525
rtw89_fw_h2c_punctured_cmac_tbl_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u16 punctured)4526 int rtw89_fw_h2c_punctured_cmac_tbl_be(struct rtw89_dev *rtwdev,
4527 struct rtw89_vif_link *rtwvif_link,
4528 u16 punctured)
4529 {
4530 struct rtw89_h2c_cctlinfo_ud_be *h2c;
4531 u32 len = sizeof(*h2c);
4532 struct sk_buff *skb;
4533 int ret;
4534
4535 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4536 if (!skb) {
4537 rtw89_err(rtwdev, "failed to alloc skb for punctured cmac be\n");
4538 return -ENOMEM;
4539 }
4540 skb_put(skb, len);
4541 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data;
4542
4543 h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) |
4544 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP);
4545
4546 h2c->w4 = le32_encode_bits(~punctured, BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
4547 h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_ACT_SUBCH_CBW);
4548
4549 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4550 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4551 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1,
4552 len);
4553
4554 ret = rtw89_h2c_tx(rtwdev, skb, false);
4555 if (ret) {
4556 rtw89_err(rtwdev, "failed to send h2c\n");
4557 goto fail;
4558 }
4559
4560 return 0;
4561 fail:
4562 dev_kfree_skb_any(skb);
4563
4564 return ret;
4565 }
4566 EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_be);
4567
rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev * rtwdev,struct rtw89_sta_link * rtwsta_link)4568 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev,
4569 struct rtw89_sta_link *rtwsta_link)
4570 {
4571 const struct rtw89_chip_info *chip = rtwdev->chip;
4572 struct sk_buff *skb;
4573 int ret;
4574
4575 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD)
4576 return 0;
4577
4578 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN);
4579 if (!skb) {
4580 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
4581 return -ENOMEM;
4582 }
4583 skb_put(skb, H2C_CMC_TBL_LEN);
4584 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id);
4585 SET_CTRL_INFO_OPERATION(skb->data, 1);
4586
4587 __rtw89_fw_h2c_set_tx_path(rtwdev, skb);
4588
4589 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4590 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4591 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
4592 H2C_CMC_TBL_LEN);
4593
4594 ret = rtw89_h2c_tx(rtwdev, skb, false);
4595 if (ret) {
4596 rtw89_err(rtwdev, "failed to send h2c\n");
4597 goto fail;
4598 }
4599
4600 return 0;
4601 fail:
4602 dev_kfree_skb_any(skb);
4603
4604 return ret;
4605 }
4606
rtw89_fw_h2c_update_beacon(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)4607 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
4608 struct rtw89_vif_link *rtwvif_link)
4609 {
4610 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
4611 rtwvif_link->chanctx_idx);
4612 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4613 struct rtw89_h2c_bcn_upd *h2c;
4614 struct sk_buff *skb_beacon;
4615 struct ieee80211_hdr *hdr;
4616 u32 len = sizeof(*h2c);
4617 struct sk_buff *skb;
4618 int bcn_total_len;
4619 u16 beacon_rate;
4620 u16 tim_offset;
4621 void *noa_data;
4622 u8 noa_len;
4623 int ret;
4624
4625 if (vif->p2p)
4626 beacon_rate = RTW89_HW_RATE_OFDM6;
4627 else if (chan->band_type == RTW89_BAND_2G)
4628 beacon_rate = RTW89_HW_RATE_CCK1;
4629 else
4630 beacon_rate = RTW89_HW_RATE_OFDM6;
4631
4632 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
4633 NULL, 0);
4634 if (!skb_beacon) {
4635 rtw89_err(rtwdev, "failed to get beacon skb\n");
4636 return -ENOMEM;
4637 }
4638
4639 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
4640 if (noa_len &&
4641 (noa_len <= skb_tailroom(skb_beacon) ||
4642 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
4643 skb_put_data(skb_beacon, noa_data, noa_len);
4644 }
4645
4646 hdr = (struct ieee80211_hdr *)skb_beacon;
4647 tim_offset -= ieee80211_hdrlen(hdr->frame_control);
4648
4649 bcn_total_len = len + skb_beacon->len;
4650 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
4651 if (!skb) {
4652 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
4653 dev_kfree_skb_any(skb_beacon);
4654 return -ENOMEM;
4655 }
4656 skb_put(skb, len);
4657 h2c = (struct rtw89_h2c_bcn_upd *)skb->data;
4658
4659 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) |
4660 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) |
4661 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) |
4662 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST);
4663 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) |
4664 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) |
4665 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) |
4666 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE);
4667
4668 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
4669 dev_kfree_skb_any(skb_beacon);
4670
4671 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4672 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4673 H2C_FUNC_MAC_BCN_UPD, 0, 1,
4674 bcn_total_len);
4675
4676 ret = rtw89_h2c_tx(rtwdev, skb, false);
4677 if (ret) {
4678 rtw89_err(rtwdev, "failed to send h2c\n");
4679 dev_kfree_skb_any(skb);
4680 return ret;
4681 }
4682
4683 return 0;
4684 }
4685 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon);
4686
rtw89_fw_h2c_update_beacon_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)4687 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev,
4688 struct rtw89_vif_link *rtwvif_link)
4689 {
4690 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
4691 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4692 struct rtw89_h2c_bcn_upd_be *h2c;
4693 struct sk_buff *skb_beacon;
4694 struct ieee80211_hdr *hdr;
4695 u32 len = sizeof(*h2c);
4696 struct sk_buff *skb;
4697 int bcn_total_len;
4698 u16 beacon_rate;
4699 u16 tim_offset;
4700 void *noa_data;
4701 u8 noa_len;
4702 int ret;
4703
4704 if (vif->p2p)
4705 beacon_rate = RTW89_HW_RATE_OFDM6;
4706 else if (chan->band_type == RTW89_BAND_2G)
4707 beacon_rate = RTW89_HW_RATE_CCK1;
4708 else
4709 beacon_rate = RTW89_HW_RATE_OFDM6;
4710
4711 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset,
4712 NULL, 0);
4713 if (!skb_beacon) {
4714 rtw89_err(rtwdev, "failed to get beacon skb\n");
4715 return -ENOMEM;
4716 }
4717
4718 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data);
4719 if (noa_len &&
4720 (noa_len <= skb_tailroom(skb_beacon) ||
4721 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) {
4722 skb_put_data(skb_beacon, noa_data, noa_len);
4723 }
4724
4725 hdr = (struct ieee80211_hdr *)skb_beacon;
4726 tim_offset -= ieee80211_hdrlen(hdr->frame_control);
4727
4728 bcn_total_len = len + skb_beacon->len;
4729 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len);
4730 if (!skb) {
4731 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
4732 dev_kfree_skb_any(skb_beacon);
4733 return -ENOMEM;
4734 }
4735 skb_put(skb, len);
4736 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data;
4737
4738 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) |
4739 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) |
4740 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) |
4741 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST);
4742 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) |
4743 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) |
4744 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) |
4745 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE);
4746
4747 skb_put_data(skb, skb_beacon->data, skb_beacon->len);
4748 dev_kfree_skb_any(skb_beacon);
4749
4750 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4751 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
4752 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1,
4753 bcn_total_len);
4754
4755 ret = rtw89_h2c_tx(rtwdev, skb, false);
4756 if (ret) {
4757 rtw89_err(rtwdev, "failed to send h2c\n");
4758 goto fail;
4759 }
4760
4761 return 0;
4762
4763 fail:
4764 dev_kfree_skb_any(skb);
4765
4766 return ret;
4767 }
4768 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be);
4769
rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u32 offset)4770 int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev,
4771 struct rtw89_vif_link *rtwvif_link, u32 offset)
4772 {
4773 struct rtw89_h2c_tbtt_tuning *h2c;
4774 u32 len = sizeof(*h2c);
4775 struct sk_buff *skb;
4776 int ret;
4777
4778 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4779 if (!skb) {
4780 rtw89_err(rtwdev, "failed to alloc skb for h2c tbtt tuning\n");
4781 return -ENOMEM;
4782 }
4783 skb_put(skb, len);
4784 h2c = (struct rtw89_h2c_tbtt_tuning *)skb->data;
4785
4786 h2c->w0 = le32_encode_bits(rtwvif_link->phy_idx, RTW89_H2C_TBTT_TUNING_W0_BAND) |
4787 le32_encode_bits(rtwvif_link->port, RTW89_H2C_TBTT_TUNING_W0_PORT);
4788 h2c->w1 = le32_encode_bits(offset, RTW89_H2C_TBTT_TUNING_W1_SHIFT);
4789
4790 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4791 H2C_CAT_MAC, H2C_CL_MAC_PS,
4792 H2C_FUNC_TBTT_TUNING, 0, 0,
4793 len);
4794
4795 ret = rtw89_h2c_tx(rtwdev, skb, false);
4796 if (ret) {
4797 rtw89_err(rtwdev, "failed to send h2c\n");
4798 goto fail;
4799 }
4800
4801 return 0;
4802 fail:
4803 dev_kfree_skb_any(skb);
4804
4805 return ret;
4806 }
4807
rtw89_fw_h2c_pwr_lvl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)4808 int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
4809 {
4810 #define RTW89_BCN_TO_VAL_MIN 4
4811 #define RTW89_BCN_TO_VAL_MAX 64
4812 #define RTW89_DTIM_TO_VAL_MIN 7
4813 #define RTW89_DTIM_TO_VAL_MAX 15
4814 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track;
4815 struct rtw89_h2c_pwr_lvl *h2c;
4816 u32 len = sizeof(*h2c);
4817 struct sk_buff *skb;
4818 u8 bcn_to_val;
4819 int ret;
4820
4821 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4822 if (!skb) {
4823 rtw89_err(rtwdev, "failed to alloc skb for h2c pwr lvl\n");
4824 return -ENOMEM;
4825 }
4826 skb_put(skb, len);
4827 h2c = (struct rtw89_h2c_pwr_lvl *)skb->data;
4828
4829 bcn_to_val = clamp_t(u8, bcn_track->bcn_timeout,
4830 RTW89_BCN_TO_VAL_MIN, RTW89_BCN_TO_VAL_MAX);
4831
4832 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_PWR_LVL_W0_MACID) |
4833 le32_encode_bits(bcn_to_val, RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL) |
4834 le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_PS_LVL) |
4835 le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_TRX_LVL) |
4836 le32_encode_bits(RTW89_DTIM_TO_VAL_MIN,
4837 RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL);
4838
4839 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4840 H2C_CAT_MAC, H2C_CL_MAC_PS,
4841 H2C_FUNC_PS_POWER_LEVEL, 0, 0,
4842 len);
4843
4844 ret = rtw89_h2c_tx(rtwdev, skb, false);
4845 if (ret) {
4846 rtw89_err(rtwdev, "failed to send h2c\n");
4847 goto fail;
4848 }
4849
4850 return 0;
4851 fail:
4852 dev_kfree_skb_any(skb);
4853
4854 return ret;
4855 }
4856
rtw89_fw_h2c_role_maintain(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,enum rtw89_upd_mode upd_mode)4857 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
4858 struct rtw89_vif_link *rtwvif_link,
4859 struct rtw89_sta_link *rtwsta_link,
4860 enum rtw89_upd_mode upd_mode)
4861 {
4862 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
4863 struct rtw89_h2c_role_maintain *h2c;
4864 u32 len = sizeof(*h2c);
4865 struct sk_buff *skb;
4866 u8 self_role;
4867 int ret;
4868
4869 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) {
4870 if (rtwsta_link)
4871 self_role = RTW89_SELF_ROLE_AP_CLIENT;
4872 else
4873 self_role = rtwvif_link->self_role;
4874 } else {
4875 self_role = rtwvif_link->self_role;
4876 }
4877
4878 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4879 if (!skb) {
4880 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4881 return -ENOMEM;
4882 }
4883 skb_put(skb, len);
4884 h2c = (struct rtw89_h2c_role_maintain *)skb->data;
4885
4886 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) |
4887 le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) |
4888 le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) |
4889 le32_encode_bits(rtwvif_link->wifi_role,
4890 RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) |
4891 le32_encode_bits(rtwvif_link->mac_idx,
4892 RTW89_H2C_ROLE_MAINTAIN_W0_BAND) |
4893 le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT);
4894
4895 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
4896 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
4897 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
4898 len);
4899
4900 ret = rtw89_h2c_tx(rtwdev, skb, false);
4901 if (ret) {
4902 rtw89_err(rtwdev, "failed to send h2c\n");
4903 goto fail;
4904 }
4905
4906 return 0;
4907 fail:
4908 dev_kfree_skb_any(skb);
4909
4910 return ret;
4911 }
4912
4913 static enum rtw89_fw_sta_type
rtw89_fw_get_sta_type(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link)4914 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4915 struct rtw89_sta_link *rtwsta_link)
4916 {
4917 struct ieee80211_bss_conf *bss_conf;
4918 struct ieee80211_link_sta *link_sta;
4919 enum rtw89_fw_sta_type type;
4920
4921 rcu_read_lock();
4922
4923 if (!rtwsta_link)
4924 goto by_vif;
4925
4926 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true);
4927
4928 if (link_sta->eht_cap.has_eht)
4929 type = RTW89_FW_BE_STA;
4930 else if (link_sta->he_cap.has_he)
4931 type = RTW89_FW_AX_STA;
4932 else
4933 type = RTW89_FW_N_AC_STA;
4934
4935 goto out;
4936
4937 by_vif:
4938 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
4939
4940 if (bss_conf->eht_support)
4941 type = RTW89_FW_BE_STA;
4942 else if (bss_conf->he_support)
4943 type = RTW89_FW_AX_STA;
4944 else
4945 type = RTW89_FW_N_AC_STA;
4946
4947 out:
4948 rcu_read_unlock();
4949
4950 return type;
4951 }
4952
rtw89_fw_h2c_join_info(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct rtw89_sta_link * rtwsta_link,bool dis_conn)4953 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
4954 struct rtw89_sta_link *rtwsta_link, bool dis_conn)
4955 {
4956 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id;
4957 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
4958 bool is_mld = ieee80211_vif_is_mld(vif);
4959 u8 self_role = rtwvif_link->self_role;
4960 enum rtw89_fw_sta_type sta_type;
4961 u8 net_type = rtwvif_link->net_type;
4962 struct rtw89_h2c_join_v1 *h2c_v1;
4963 struct rtw89_h2c_join *h2c;
4964 u32 len = sizeof(*h2c);
4965 bool format_v1 = false;
4966 struct sk_buff *skb;
4967 u8 main_mac_id;
4968 bool init_ps;
4969 int ret;
4970
4971 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
4972 len = sizeof(*h2c_v1);
4973 format_v1 = true;
4974 }
4975
4976 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) {
4977 self_role = RTW89_SELF_ROLE_AP_CLIENT;
4978 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
4979 }
4980
4981 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
4982 if (!skb) {
4983 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
4984 return -ENOMEM;
4985 }
4986 skb_put(skb, len);
4987 h2c = (struct rtw89_h2c_join *)skb->data;
4988
4989 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) |
4990 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) |
4991 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) |
4992 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) |
4993 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) |
4994 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) |
4995 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) |
4996 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) |
4997 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) |
4998 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) |
4999 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) |
5000 le32_encode_bits(rtwvif_link->wifi_role,
5001 RTW89_H2C_JOININFO_W0_WIFI_ROLE) |
5002 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE);
5003
5004 if (!format_v1)
5005 goto done;
5006
5007 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data;
5008
5009 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link);
5010 init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif);
5011
5012 if (rtwsta_link)
5013 main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta);
5014 else
5015 main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif);
5016
5017 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) |
5018 le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) |
5019 le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) |
5020 le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR,
5021 RTW89_H2C_JOININFO_W1_MLO_MODE) |
5022 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) |
5023 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) |
5024 le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) |
5025 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US,
5026 RTW89_H2C_JOININFO_W1_EMLSR_PADDING) |
5027 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US,
5028 RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) |
5029 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) |
5030 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT);
5031
5032 h2c_v1->w2 = 0;
5033
5034 done:
5035 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5036 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
5037 H2C_FUNC_MAC_JOININFO, 0, 1,
5038 len);
5039
5040 ret = rtw89_h2c_tx(rtwdev, skb, false);
5041 if (ret) {
5042 rtw89_err(rtwdev, "failed to send h2c\n");
5043 goto fail;
5044 }
5045
5046 return 0;
5047 fail:
5048 dev_kfree_skb_any(skb);
5049
5050 return ret;
5051 }
5052
rtw89_fw_h2c_notify_dbcc(struct rtw89_dev * rtwdev,bool en)5053 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en)
5054 {
5055 struct rtw89_h2c_notify_dbcc *h2c;
5056 u32 len = sizeof(*h2c);
5057 struct sk_buff *skb;
5058 int ret;
5059
5060 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5061 if (!skb) {
5062 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n");
5063 return -ENOMEM;
5064 }
5065 skb_put(skb, len);
5066 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data;
5067
5068 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN);
5069
5070 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5071 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
5072 H2C_FUNC_NOTIFY_DBCC, 0, 1,
5073 len);
5074
5075 ret = rtw89_h2c_tx(rtwdev, skb, false);
5076 if (ret) {
5077 rtw89_err(rtwdev, "failed to send h2c\n");
5078 goto fail;
5079 }
5080
5081 return 0;
5082 fail:
5083 dev_kfree_skb_any(skb);
5084
5085 return ret;
5086 }
5087
rtw89_fw_h2c_macid_pause(struct rtw89_dev * rtwdev,u8 sh,u8 grp,bool pause)5088 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
5089 bool pause)
5090 {
5091 struct rtw89_fw_macid_pause_sleep_grp *h2c_new;
5092 struct rtw89_fw_macid_pause_grp *h2c;
5093 __le32 set = cpu_to_le32(BIT(sh));
5094 u8 h2c_macid_pause_id;
5095 struct sk_buff *skb;
5096 u32 len;
5097 int ret;
5098
5099 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) {
5100 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP;
5101 len = sizeof(*h2c_new);
5102 } else {
5103 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE;
5104 len = sizeof(*h2c);
5105 }
5106
5107 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5108 if (!skb) {
5109 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n");
5110 return -ENOMEM;
5111 }
5112 skb_put(skb, len);
5113
5114 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) {
5115 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data;
5116
5117 h2c_new->n[0].pause_mask_grp[grp] = set;
5118 h2c_new->n[0].sleep_mask_grp[grp] = set;
5119 if (pause) {
5120 h2c_new->n[0].pause_grp[grp] = set;
5121 h2c_new->n[0].sleep_grp[grp] = set;
5122 }
5123 } else {
5124 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data;
5125
5126 h2c->mask_grp[grp] = set;
5127 if (pause)
5128 h2c->pause_grp[grp] = set;
5129 }
5130
5131 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5132 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5133 h2c_macid_pause_id, 1, 0,
5134 len);
5135
5136 ret = rtw89_h2c_tx(rtwdev, skb, false);
5137 if (ret) {
5138 rtw89_err(rtwdev, "failed to send h2c\n");
5139 goto fail;
5140 }
5141
5142 return 0;
5143 fail:
5144 dev_kfree_skb_any(skb);
5145
5146 return ret;
5147 }
5148
5149 #define H2C_EDCA_LEN 12
rtw89_fw_h2c_set_edca(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u8 ac,u32 val)5150 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
5151 u8 ac, u32 val)
5152 {
5153 struct sk_buff *skb;
5154 int ret;
5155
5156 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN);
5157 if (!skb) {
5158 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
5159 return -ENOMEM;
5160 }
5161 skb_put(skb, H2C_EDCA_LEN);
5162 RTW89_SET_EDCA_SEL(skb->data, 0);
5163 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx);
5164 RTW89_SET_EDCA_WMM(skb->data, 0);
5165 RTW89_SET_EDCA_AC(skb->data, ac);
5166 RTW89_SET_EDCA_PARAM(skb->data, val);
5167
5168 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5169 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5170 H2C_FUNC_USR_EDCA, 0, 1,
5171 H2C_EDCA_LEN);
5172
5173 ret = rtw89_h2c_tx(rtwdev, skb, false);
5174 if (ret) {
5175 rtw89_err(rtwdev, "failed to send h2c\n");
5176 goto fail;
5177 }
5178
5179 return 0;
5180 fail:
5181 dev_kfree_skb_any(skb);
5182
5183 return ret;
5184 }
5185
5186 #define H2C_TSF32_TOGL_LEN 4
rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool en)5187 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev,
5188 struct rtw89_vif_link *rtwvif_link,
5189 bool en)
5190 {
5191 struct sk_buff *skb;
5192 u16 early_us = en ? 2000 : 0;
5193 u8 *cmd;
5194 int ret;
5195
5196 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN);
5197 if (!skb) {
5198 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n");
5199 return -ENOMEM;
5200 }
5201 skb_put(skb, H2C_TSF32_TOGL_LEN);
5202 cmd = skb->data;
5203
5204 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx);
5205 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en);
5206 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port);
5207 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us);
5208
5209 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5210 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5211 H2C_FUNC_TSF32_TOGL, 0, 0,
5212 H2C_TSF32_TOGL_LEN);
5213
5214 ret = rtw89_h2c_tx(rtwdev, skb, false);
5215 if (ret) {
5216 rtw89_err(rtwdev, "failed to send h2c\n");
5217 goto fail;
5218 }
5219
5220 return 0;
5221 fail:
5222 dev_kfree_skb_any(skb);
5223
5224 return ret;
5225 }
5226
5227 #define H2C_OFLD_CFG_LEN 8
rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev * rtwdev)5228 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
5229 {
5230 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
5231 struct sk_buff *skb;
5232 int ret;
5233
5234 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN);
5235 if (!skb) {
5236 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
5237 return -ENOMEM;
5238 }
5239 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
5240
5241 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5242 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5243 H2C_FUNC_OFLD_CFG, 0, 1,
5244 H2C_OFLD_CFG_LEN);
5245
5246 ret = rtw89_h2c_tx(rtwdev, skb, false);
5247 if (ret) {
5248 rtw89_err(rtwdev, "failed to send h2c\n");
5249 goto fail;
5250 }
5251
5252 return 0;
5253 fail:
5254 dev_kfree_skb_any(skb);
5255
5256 return ret;
5257 }
5258
rtw89_fw_h2c_tx_duty(struct rtw89_dev * rtwdev,u8 lv)5259 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv)
5260 {
5261 struct rtw89_h2c_tx_duty *h2c;
5262 u32 len = sizeof(*h2c);
5263 struct sk_buff *skb;
5264 u16 pause, active;
5265 int ret;
5266
5267 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5268 if (!skb) {
5269 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n");
5270 return -ENOMEM;
5271 }
5272
5273 skb_put(skb, len);
5274 h2c = (struct rtw89_h2c_tx_duty *)skb->data;
5275
5276 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100);
5277
5278 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) {
5279 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP);
5280 } else {
5281 active = 100 - lv * RTW89_THERMAL_PROT_STEP;
5282 pause = 100 - active;
5283
5284 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) |
5285 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK);
5286 }
5287
5288 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5289 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5290 H2C_FUNC_TX_DUTY, 0, 0, len);
5291
5292 ret = rtw89_h2c_tx(rtwdev, skb, false);
5293 if (ret) {
5294 rtw89_err(rtwdev, "failed to send h2c\n");
5295 goto fail;
5296 }
5297
5298 return 0;
5299 fail:
5300 dev_kfree_skb_any(skb);
5301
5302 return ret;
5303 }
5304
rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool connect)5305 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev,
5306 struct rtw89_vif_link *rtwvif_link,
5307 bool connect)
5308 {
5309 struct ieee80211_bss_conf *bss_conf;
5310 s32 thold = RTW89_DEFAULT_CQM_THOLD;
5311 u32 hyst = RTW89_DEFAULT_CQM_HYST;
5312 struct rtw89_h2c_bcnfltr *h2c;
5313 u32 len = sizeof(*h2c);
5314 struct sk_buff *skb;
5315 u8 max_cnt, cnt;
5316 int ret;
5317
5318 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
5319 return -EINVAL;
5320
5321 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
5322 return -EINVAL;
5323
5324 rcu_read_lock();
5325
5326 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false);
5327
5328 if (bss_conf->cqm_rssi_hyst)
5329 hyst = bss_conf->cqm_rssi_hyst;
5330 if (bss_conf->cqm_rssi_thold)
5331 thold = bss_conf->cqm_rssi_thold;
5332
5333 rcu_read_unlock();
5334
5335 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5336 if (!skb) {
5337 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n");
5338 return -ENOMEM;
5339 }
5340
5341 skb_put(skb, len);
5342 h2c = (struct rtw89_h2c_bcnfltr *)skb->data;
5343
5344 if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw))
5345 max_cnt = BIT(7) - 1;
5346 else
5347 max_cnt = BIT(4) - 1;
5348
5349 cnt = min(RTW89_BCN_LOSS_CNT, max_cnt);
5350
5351 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) |
5352 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) |
5353 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) |
5354 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT,
5355 RTW89_H2C_BCNFLTR_W0_MODE) |
5356 le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) |
5357 le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) |
5358 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) |
5359 le32_encode_bits(thold + MAX_RSSI,
5360 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) |
5361 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID);
5362
5363 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5364 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5365 H2C_FUNC_CFG_BCNFLTR, 0, 1, len);
5366
5367 ret = rtw89_h2c_tx(rtwdev, skb, false);
5368 if (ret) {
5369 rtw89_err(rtwdev, "failed to send h2c\n");
5370 goto fail;
5371 }
5372
5373 return 0;
5374 fail:
5375 dev_kfree_skb_any(skb);
5376
5377 return ret;
5378 }
5379
rtw89_fw_h2c_rssi_offload(struct rtw89_dev * rtwdev,struct rtw89_rx_phy_ppdu * phy_ppdu)5380 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev,
5381 struct rtw89_rx_phy_ppdu *phy_ppdu)
5382 {
5383 struct rtw89_h2c_ofld_rssi *h2c;
5384 u32 len = sizeof(*h2c);
5385 struct sk_buff *skb;
5386 s8 rssi;
5387 int ret;
5388
5389 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
5390 return -EINVAL;
5391
5392 if (!phy_ppdu)
5393 return -EINVAL;
5394
5395 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5396 if (!skb) {
5397 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n");
5398 return -ENOMEM;
5399 }
5400
5401 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR;
5402 skb_put(skb, len);
5403 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data;
5404
5405 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) |
5406 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM);
5407 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL);
5408
5409 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5410 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5411 H2C_FUNC_OFLD_RSSI, 0, 1, len);
5412
5413 ret = rtw89_h2c_tx(rtwdev, skb, false);
5414 if (ret) {
5415 rtw89_err(rtwdev, "failed to send h2c\n");
5416 goto fail;
5417 }
5418
5419 return 0;
5420 fail:
5421 dev_kfree_skb_any(skb);
5422
5423 return ret;
5424 }
5425
rtw89_fw_h2c_tp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)5426 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link)
5427 {
5428 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
5429 struct rtw89_traffic_stats *stats = &rtwvif->stats;
5430 struct rtw89_h2c_ofld *h2c;
5431 u32 len = sizeof(*h2c);
5432 struct sk_buff *skb;
5433 int ret;
5434
5435 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA)
5436 return -EINVAL;
5437
5438 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5439 if (!skb) {
5440 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n");
5441 return -ENOMEM;
5442 }
5443
5444 skb_put(skb, len);
5445 h2c = (struct rtw89_h2c_ofld *)skb->data;
5446
5447 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) |
5448 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) |
5449 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP);
5450
5451 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5452 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
5453 H2C_FUNC_OFLD_TP, 0, 1, len);
5454
5455 ret = rtw89_h2c_tx(rtwdev, skb, false);
5456 if (ret) {
5457 rtw89_err(rtwdev, "failed to send h2c\n");
5458 goto fail;
5459 }
5460
5461 return 0;
5462 fail:
5463 dev_kfree_skb_any(skb);
5464
5465 return ret;
5466 }
5467
rtw89_fw_h2c_ra(struct rtw89_dev * rtwdev,struct rtw89_ra_info * ra,bool csi)5468 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
5469 {
5470 const struct rtw89_chip_info *chip = rtwdev->chip;
5471 struct rtw89_h2c_ra_v1 *h2c_v1;
5472 struct rtw89_h2c_ra *h2c;
5473 u32 len = sizeof(*h2c);
5474 struct sk_buff *skb;
5475 u8 ver = U8_MAX;
5476 int ret;
5477
5478 if (chip->chip_gen == RTW89_CHIP_AX) {
5479 len = sizeof(*h2c);
5480 ver = 0;
5481 } else {
5482 len = sizeof(*h2c_v1);
5483 ver = 1;
5484 }
5485
5486 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5487 if (!skb) {
5488 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
5489 return -ENOMEM;
5490 }
5491 skb_put(skb, len);
5492 h2c = (struct rtw89_h2c_ra *)skb->data;
5493 rtw89_debug(rtwdev, RTW89_DBG_RA,
5494 "ra cmd msk: %llx ", ra->ra_mask);
5495
5496 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) |
5497 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) |
5498 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) |
5499 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) |
5500 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) |
5501 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) |
5502 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) |
5503 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) |
5504 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) |
5505 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) |
5506 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) |
5507 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) |
5508 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) |
5509 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK);
5510 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32);
5511 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32);
5512 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) |
5513 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF);
5514
5515 if (!csi || ver >= 1)
5516 goto next_v1;
5517
5518 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL);
5519 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) |
5520 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) |
5521 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) |
5522 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) |
5523 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) |
5524 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) |
5525 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) |
5526 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW);
5527
5528 next_v1:
5529 if (ver < 1)
5530 goto done;
5531
5532 h2c->w3 |= le32_encode_bits(ra->partial_bw_er,
5533 RTW89_H2C_RA_V1_W3_PARTIAL_BW_SU_ER) |
5534 le32_encode_bits(ra->band, RTW89_H2C_RA_V1_W3_BAND);
5535
5536 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c;
5537 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) |
5538 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT);
5539
5540 done:
5541 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5542 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
5543 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
5544 len);
5545
5546 ret = rtw89_h2c_tx(rtwdev, skb, false);
5547 if (ret) {
5548 rtw89_err(rtwdev, "failed to send h2c\n");
5549 goto fail;
5550 }
5551
5552 return 0;
5553 fail:
5554 dev_kfree_skb_any(skb);
5555
5556 return ret;
5557 }
5558
rtw89_fw_h2c_cxdrv_init(struct rtw89_dev * rtwdev,u8 type)5559 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type)
5560 {
5561 struct rtw89_btc *btc = &rtwdev->btc;
5562 struct rtw89_btc_dm *dm = &btc->dm;
5563 struct rtw89_btc_init_info *init_info = &dm->init_info.init;
5564 struct rtw89_btc_module *module = &init_info->module;
5565 struct rtw89_btc_ant_info *ant = &module->ant;
5566 struct rtw89_h2c_cxinit *h2c;
5567 u32 len = sizeof(*h2c);
5568 struct sk_buff *skb;
5569 int ret;
5570
5571 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5572 if (!skb) {
5573 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
5574 return -ENOMEM;
5575 }
5576 skb_put(skb, len);
5577 h2c = (struct rtw89_h2c_cxinit *)skb->data;
5578
5579 h2c->hdr.type = type;
5580 h2c->hdr.len = len - H2C_LEN_CXDRVHDR;
5581
5582 h2c->ant_type = ant->type;
5583 h2c->ant_num = ant->num;
5584 h2c->ant_iso = ant->isolation;
5585 h2c->ant_info =
5586 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) |
5587 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) |
5588 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) |
5589 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT);
5590
5591 h2c->mod_rfe = module->rfe_type;
5592 h2c->mod_cv = module->cv;
5593 h2c->mod_info =
5594 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) |
5595 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) |
5596 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) |
5597 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE);
5598 h2c->mod_adie_kt = module->kt_ver_adie;
5599 h2c->wl_gch = init_info->wl_guard_ch;
5600
5601 h2c->info =
5602 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) |
5603 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) |
5604 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) |
5605 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) |
5606 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY);
5607
5608 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5609 H2C_CAT_OUTSRC, BTFC_SET,
5610 SET_DRV_INFO, 0, 0,
5611 len);
5612
5613 ret = rtw89_h2c_tx(rtwdev, skb, false);
5614 if (ret) {
5615 rtw89_err(rtwdev, "failed to send h2c\n");
5616 goto fail;
5617 }
5618
5619 return 0;
5620 fail:
5621 dev_kfree_skb_any(skb);
5622
5623 return ret;
5624 }
5625
rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev * rtwdev,u8 type)5626 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type)
5627 {
5628 struct rtw89_btc *btc = &rtwdev->btc;
5629 struct rtw89_btc_dm *dm = &btc->dm;
5630 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7;
5631 struct rtw89_h2c_cxinit_v7 *h2c;
5632 u32 len = sizeof(*h2c);
5633 struct sk_buff *skb;
5634 int ret;
5635
5636 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5637 if (!skb) {
5638 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n");
5639 return -ENOMEM;
5640 }
5641 skb_put(skb, len);
5642 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data;
5643
5644 h2c->hdr.type = type;
5645 h2c->hdr.ver = btc->ver->fcxinit;
5646 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5647 h2c->init = *init_info;
5648
5649 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5650 H2C_CAT_OUTSRC, BTFC_SET,
5651 SET_DRV_INFO, 0, 0,
5652 len);
5653
5654 ret = rtw89_h2c_tx(rtwdev, skb, false);
5655 if (ret) {
5656 rtw89_err(rtwdev, "failed to send h2c\n");
5657 goto fail;
5658 }
5659
5660 return 0;
5661 fail:
5662 dev_kfree_skb_any(skb);
5663
5664 return ret;
5665 }
5666
5667 #define PORT_DATA_OFFSET 4
5668 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
5669 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
5670 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
5671
rtw89_fw_h2c_cxdrv_role(struct rtw89_dev * rtwdev,u8 type)5672 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type)
5673 {
5674 struct rtw89_btc *btc = &rtwdev->btc;
5675 const struct rtw89_btc_ver *ver = btc->ver;
5676 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
5677 struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
5678 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
5679 struct rtw89_btc_wl_active_role *active = role_info->active_role;
5680 struct sk_buff *skb;
5681 u32 len;
5682 u8 offset = 0;
5683 u8 *cmd;
5684 int ret;
5685 int i;
5686
5687 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
5688
5689 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5690 if (!skb) {
5691 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
5692 return -ENOMEM;
5693 }
5694 skb_put(skb, len);
5695 cmd = skb->data;
5696
5697 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5698 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
5699
5700 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
5701 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
5702
5703 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
5704 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
5705 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
5706 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
5707 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
5708 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
5709 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
5710 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
5711 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
5712 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
5713 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
5714 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
5715
5716 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
5717 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
5718 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
5719 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
5720 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
5721 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
5722 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
5723 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
5724 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
5725 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
5726 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
5727 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
5728 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
5729 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
5730 }
5731
5732 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5733 H2C_CAT_OUTSRC, BTFC_SET,
5734 SET_DRV_INFO, 0, 0,
5735 len);
5736
5737 ret = rtw89_h2c_tx(rtwdev, skb, false);
5738 if (ret) {
5739 rtw89_err(rtwdev, "failed to send h2c\n");
5740 goto fail;
5741 }
5742
5743 return 0;
5744 fail:
5745 dev_kfree_skb_any(skb);
5746
5747 return ret;
5748 }
5749
5750 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
5751 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
5752
rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev * rtwdev,u8 type)5753 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type)
5754 {
5755 struct rtw89_btc *btc = &rtwdev->btc;
5756 const struct rtw89_btc_ver *ver = btc->ver;
5757 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
5758 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
5759 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
5760 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
5761 struct sk_buff *skb;
5762 u32 len;
5763 u8 *cmd, offset;
5764 int ret;
5765 int i;
5766
5767 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
5768
5769 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5770 if (!skb) {
5771 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
5772 return -ENOMEM;
5773 }
5774 skb_put(skb, len);
5775 cmd = skb->data;
5776
5777 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5778 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
5779
5780 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
5781 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
5782
5783 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
5784 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
5785 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
5786 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
5787 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
5788 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
5789 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
5790 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
5791 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
5792 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
5793 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
5794 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
5795
5796 offset = PORT_DATA_OFFSET;
5797 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
5798 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset);
5799 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset);
5800 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset);
5801 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset);
5802 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset);
5803 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset);
5804 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset);
5805 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset);
5806 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset);
5807 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset);
5808 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset);
5809 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset);
5810 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset);
5811 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
5812 }
5813
5814 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
5815 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
5816 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
5817 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
5818 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
5819 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
5820 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
5821
5822 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5823 H2C_CAT_OUTSRC, BTFC_SET,
5824 SET_DRV_INFO, 0, 0,
5825 len);
5826
5827 ret = rtw89_h2c_tx(rtwdev, skb, false);
5828 if (ret) {
5829 rtw89_err(rtwdev, "failed to send h2c\n");
5830 goto fail;
5831 }
5832
5833 return 0;
5834 fail:
5835 dev_kfree_skb_any(skb);
5836
5837 return ret;
5838 }
5839
5840 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \
5841 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
5842
rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev * rtwdev,u8 type)5843 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type)
5844 {
5845 struct rtw89_btc *btc = &rtwdev->btc;
5846 const struct rtw89_btc_ver *ver = btc->ver;
5847 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
5848 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2;
5849 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
5850 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2;
5851 struct sk_buff *skb;
5852 u32 len;
5853 u8 *cmd, offset;
5854 int ret;
5855 int i;
5856
5857 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num);
5858
5859 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5860 if (!skb) {
5861 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
5862 return -ENOMEM;
5863 }
5864 skb_put(skb, len);
5865 cmd = skb->data;
5866
5867 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
5868 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
5869
5870 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
5871 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
5872
5873 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
5874 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
5875 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
5876 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
5877 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
5878 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
5879 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
5880 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
5881 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
5882 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
5883 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
5884 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
5885
5886 offset = PORT_DATA_OFFSET;
5887 for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
5888 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset);
5889 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset);
5890 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset);
5891 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset);
5892 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset);
5893 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset);
5894 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset);
5895 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset);
5896 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset);
5897 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset);
5898 }
5899
5900 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
5901 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
5902 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
5903 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
5904 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset);
5905 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset);
5906 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset);
5907
5908 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5909 H2C_CAT_OUTSRC, BTFC_SET,
5910 SET_DRV_INFO, 0, 0,
5911 len);
5912
5913 ret = rtw89_h2c_tx(rtwdev, skb, false);
5914 if (ret) {
5915 rtw89_err(rtwdev, "failed to send h2c\n");
5916 goto fail;
5917 }
5918
5919 return 0;
5920 fail:
5921 dev_kfree_skb_any(skb);
5922
5923 return ret;
5924 }
5925
rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev * rtwdev,u8 type)5926 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type)
5927 {
5928 struct rtw89_btc *btc = &rtwdev->btc;
5929 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7;
5930 struct rtw89_h2c_cxrole_v7 *h2c;
5931 u32 len = sizeof(*h2c);
5932 struct sk_buff *skb;
5933 int ret;
5934
5935 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5936 if (!skb) {
5937 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5938 return -ENOMEM;
5939 }
5940 skb_put(skb, len);
5941 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data;
5942
5943 h2c->hdr.type = type;
5944 h2c->hdr.ver = btc->ver->fwlrole;
5945 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5946 memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
5947 h2c->_u32.role_map = cpu_to_le32(role->role_map);
5948 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
5949 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
5950 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en);
5951 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg);
5952 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy);
5953
5954 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5955 H2C_CAT_OUTSRC, BTFC_SET,
5956 SET_DRV_INFO, 0, 0,
5957 len);
5958
5959 ret = rtw89_h2c_tx(rtwdev, skb, false);
5960 if (ret) {
5961 rtw89_err(rtwdev, "failed to send h2c\n");
5962 goto fail;
5963 }
5964
5965 return 0;
5966 fail:
5967 dev_kfree_skb_any(skb);
5968
5969 return ret;
5970 }
5971
rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev * rtwdev,u8 type)5972 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
5973 {
5974 struct rtw89_btc *btc = &rtwdev->btc;
5975 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8;
5976 struct rtw89_h2c_cxrole_v8 *h2c;
5977 u32 len = sizeof(*h2c);
5978 struct sk_buff *skb;
5979 int ret;
5980
5981 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
5982 if (!skb) {
5983 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
5984 return -ENOMEM;
5985 }
5986 skb_put(skb, len);
5987 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
5988
5989 h2c->hdr.type = type;
5990 h2c->hdr.ver = btc->ver->fwlrole;
5991 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
5992 memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
5993 h2c->_u32.role_map = cpu_to_le32(role->role_map);
5994 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
5995 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
5996
5997 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
5998 H2C_CAT_OUTSRC, BTFC_SET,
5999 SET_DRV_INFO, 0, 0,
6000 len);
6001
6002 ret = rtw89_h2c_tx(rtwdev, skb, false);
6003 if (ret) {
6004 rtw89_err(rtwdev, "failed to send h2c\n");
6005 goto fail;
6006 }
6007
6008 return 0;
6009 fail:
6010 dev_kfree_skb_any(skb);
6011
6012 return ret;
6013 }
6014
rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev * rtwdev,u8 type)6015 int rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev *rtwdev, u8 type)
6016 {
6017 struct rtw89_btc *btc = &rtwdev->btc;
6018 struct rtw89_btc_fbtc_outsrc_set_info *osi = &btc->dm.ost_info;
6019 struct rtw89_h2c_cxosi *h2c;
6020 u32 len = sizeof(*h2c);
6021 struct sk_buff *skb;
6022 int ret;
6023
6024 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6025 if (!skb) {
6026 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_osi\n");
6027 return -ENOMEM;
6028 }
6029 skb_put(skb, len);
6030 h2c = (struct rtw89_h2c_cxosi *)skb->data;
6031
6032 h2c->hdr.type = type;
6033 h2c->hdr.ver = btc->ver->fcxosi;
6034 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
6035 h2c->osi = *osi;
6036
6037 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6038 H2C_CAT_OUTSRC, BTFC_SET,
6039 SET_DRV_INFO, 0, 0,
6040 len);
6041
6042 ret = rtw89_h2c_tx(rtwdev, skb, false);
6043 if (ret) {
6044 rtw89_err(rtwdev, "failed to send h2c\n");
6045 goto fail;
6046 }
6047
6048 return 0;
6049 fail:
6050 dev_kfree_skb_any(skb);
6051
6052 return ret;
6053 }
6054
6055 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev * rtwdev,u8 type)6056 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
6057 {
6058 struct rtw89_btc *btc = &rtwdev->btc;
6059 const struct rtw89_btc_ver *ver = btc->ver;
6060 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl;
6061 struct sk_buff *skb;
6062 u8 *cmd;
6063 int ret;
6064
6065 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL);
6066 if (!skb) {
6067 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
6068 return -ENOMEM;
6069 }
6070 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
6071 cmd = skb->data;
6072
6073 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
6074 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
6075
6076 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
6077 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
6078 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
6079 if (ver->fcxctrl == 0)
6080 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
6081
6082 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6083 H2C_CAT_OUTSRC, BTFC_SET,
6084 SET_DRV_INFO, 0, 0,
6085 H2C_LEN_CXDRVINFO_CTRL);
6086
6087 ret = rtw89_h2c_tx(rtwdev, skb, false);
6088 if (ret) {
6089 rtw89_err(rtwdev, "failed to send h2c\n");
6090 goto fail;
6091 }
6092
6093 return 0;
6094 fail:
6095 dev_kfree_skb_any(skb);
6096
6097 return ret;
6098 }
6099
rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev * rtwdev,u8 type)6100 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type)
6101 {
6102 struct rtw89_btc *btc = &rtwdev->btc;
6103 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7;
6104 struct rtw89_h2c_cxctrl_v7 *h2c;
6105 u32 len = sizeof(*h2c);
6106 struct sk_buff *skb;
6107 int ret;
6108
6109 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6110 if (!skb) {
6111 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n");
6112 return -ENOMEM;
6113 }
6114 skb_put(skb, len);
6115 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data;
6116
6117 h2c->hdr.type = type;
6118 h2c->hdr.ver = btc->ver->fcxctrl;
6119 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7;
6120 h2c->ctrl = *ctrl;
6121
6122 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6123 H2C_CAT_OUTSRC, BTFC_SET,
6124 SET_DRV_INFO, 0, 0, len);
6125
6126 ret = rtw89_h2c_tx(rtwdev, skb, false);
6127 if (ret) {
6128 rtw89_err(rtwdev, "failed to send h2c\n");
6129 goto fail;
6130 }
6131
6132 return 0;
6133 fail:
6134 dev_kfree_skb_any(skb);
6135
6136 return ret;
6137 }
6138
6139 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev * rtwdev,u8 type)6140 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type)
6141 {
6142 struct rtw89_btc *btc = &rtwdev->btc;
6143 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info;
6144 struct sk_buff *skb;
6145 u8 *cmd;
6146 int ret;
6147
6148 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX);
6149 if (!skb) {
6150 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n");
6151 return -ENOMEM;
6152 }
6153 skb_put(skb, H2C_LEN_CXDRVINFO_TRX);
6154 cmd = skb->data;
6155
6156 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
6157 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR);
6158
6159 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl);
6160 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl);
6161 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi);
6162 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi);
6163 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power);
6164 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain);
6165 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power);
6166 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain);
6167 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn);
6168 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm);
6169 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile);
6170 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2);
6171 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate);
6172 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate);
6173 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp);
6174 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp);
6175 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio);
6176
6177 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6178 H2C_CAT_OUTSRC, BTFC_SET,
6179 SET_DRV_INFO, 0, 0,
6180 H2C_LEN_CXDRVINFO_TRX);
6181
6182 ret = rtw89_h2c_tx(rtwdev, skb, false);
6183 if (ret) {
6184 rtw89_err(rtwdev, "failed to send h2c\n");
6185 goto fail;
6186 }
6187
6188 return 0;
6189 fail:
6190 dev_kfree_skb_any(skb);
6191
6192 return ret;
6193 }
6194
6195 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev * rtwdev,u8 type)6196 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type)
6197 {
6198 struct rtw89_btc *btc = &rtwdev->btc;
6199 struct rtw89_btc_wl_info *wl = &btc->cx.wl;
6200 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
6201 struct sk_buff *skb;
6202 u8 *cmd;
6203 int ret;
6204
6205 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK);
6206 if (!skb) {
6207 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
6208 return -ENOMEM;
6209 }
6210 skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
6211 cmd = skb->data;
6212
6213 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type);
6214 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
6215
6216 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
6217 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
6218 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
6219 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
6220 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
6221
6222 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6223 H2C_CAT_OUTSRC, BTFC_SET,
6224 SET_DRV_INFO, 0, 0,
6225 H2C_LEN_CXDRVINFO_RFK);
6226
6227 ret = rtw89_h2c_tx(rtwdev, skb, false);
6228 if (ret) {
6229 rtw89_err(rtwdev, "failed to send h2c\n");
6230 goto fail;
6231 }
6232
6233 return 0;
6234 fail:
6235 dev_kfree_skb_any(skb);
6236
6237 return ret;
6238 }
6239
6240 #define H2C_LEN_PKT_OFLD 4
rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev * rtwdev,u8 id)6241 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id)
6242 {
6243 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
6244 struct sk_buff *skb;
6245 unsigned int cond;
6246 u8 *cmd;
6247 int ret;
6248
6249 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD);
6250 if (!skb) {
6251 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
6252 return -ENOMEM;
6253 }
6254 skb_put(skb, H2C_LEN_PKT_OFLD);
6255 cmd = skb->data;
6256
6257 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id);
6258 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL);
6259
6260 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6261 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
6262 H2C_FUNC_PACKET_OFLD, 1, 1,
6263 H2C_LEN_PKT_OFLD);
6264
6265 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL);
6266
6267 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
6268 if (ret < 0) {
6269 rtw89_debug(rtwdev, RTW89_DBG_FW,
6270 "failed to del pkt ofld: id %d, ret %d\n",
6271 id, ret);
6272 return ret;
6273 }
6274
6275 rtw89_core_release_bit_map(rtwdev->pkt_offload, id);
6276 return 0;
6277 }
6278
rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev * rtwdev,u8 * id,struct sk_buff * skb_ofld)6279 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
6280 struct sk_buff *skb_ofld)
6281 {
6282 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
6283 struct sk_buff *skb;
6284 unsigned int cond;
6285 u8 *cmd;
6286 u8 alloc_id;
6287 int ret;
6288
6289 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload,
6290 RTW89_MAX_PKT_OFLD_NUM);
6291 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM)
6292 return -ENOSPC;
6293
6294 *id = alloc_id;
6295
6296 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
6297 if (!skb) {
6298 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
6299 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
6300 return -ENOMEM;
6301 }
6302 skb_put(skb, H2C_LEN_PKT_OFLD);
6303 cmd = skb->data;
6304
6305 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id);
6306 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD);
6307 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len);
6308 skb_put_data(skb, skb_ofld->data, skb_ofld->len);
6309
6310 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6311 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
6312 H2C_FUNC_PACKET_OFLD, 1, 1,
6313 H2C_LEN_PKT_OFLD + skb_ofld->len);
6314
6315 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD);
6316
6317 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
6318 if (ret < 0) {
6319 rtw89_debug(rtwdev, RTW89_DBG_FW,
6320 "failed to add pkt ofld: id %d, ret %d\n",
6321 alloc_id, ret);
6322 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
6323 return ret;
6324 }
6325
6326 return 0;
6327 }
6328
6329 static
rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev * rtwdev,int ch_num,struct list_head * chan_list)6330 int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num,
6331 struct list_head *chan_list)
6332 {
6333 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6334 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
6335 struct rtw89_h2c_chinfo_elem *elem;
6336 struct rtw89_mac_chinfo_ax *ch_info;
6337 struct rtw89_h2c_chinfo *h2c;
6338 struct sk_buff *skb;
6339 unsigned int cond;
6340 int skb_len;
6341 int ret;
6342
6343 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE);
6344
6345 skb_len = struct_size(h2c, elem, ch_num);
6346 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
6347 if (!skb) {
6348 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
6349 return -ENOMEM;
6350 }
6351 skb_put(skb, sizeof(*h2c));
6352 h2c = (struct rtw89_h2c_chinfo *)skb->data;
6353
6354 h2c->ch_num = ch_num;
6355 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
6356
6357 list_for_each_entry(ch_info, chan_list, list) {
6358 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem));
6359
6360 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) |
6361 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) |
6362 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) |
6363 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH);
6364
6365 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) |
6366 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) |
6367 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) |
6368 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) |
6369 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) |
6370 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) |
6371 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) |
6372 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) |
6373 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) |
6374 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM);
6375
6376 if (scan_info->extra_op.set)
6377 elem->w1 |= le32_encode_bits(ch_info->macid_tx,
6378 RTW89_H2C_CHINFO_W1_MACID_TX);
6379
6380 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) |
6381 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) |
6382 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) |
6383 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3);
6384
6385 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) |
6386 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) |
6387 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) |
6388 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7);
6389 }
6390
6391 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6392 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
6393 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
6394
6395 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
6396
6397 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
6398 if (ret) {
6399 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
6400 return ret;
6401 }
6402
6403 return 0;
6404 }
6405
6406 static
rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev * rtwdev,int ch_num,struct list_head * chan_list,struct rtw89_vif_link * rtwvif_link)6407 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num,
6408 struct list_head *chan_list,
6409 struct rtw89_vif_link *rtwvif_link)
6410 {
6411 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
6412 struct rtw89_h2c_chinfo_elem_be *elem;
6413 struct rtw89_mac_chinfo_be *ch_info;
6414 struct rtw89_h2c_chinfo_be *h2c;
6415 struct sk_buff *skb;
6416 unsigned int cond;
6417 u8 ver = U8_MAX;
6418 int skb_len;
6419 int ret;
6420
6421 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE);
6422
6423 skb_len = struct_size(h2c, elem, ch_num);
6424 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len);
6425 if (!skb) {
6426 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n");
6427 return -ENOMEM;
6428 }
6429
6430 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
6431 ver = 0;
6432
6433 skb_put(skb, sizeof(*h2c));
6434 h2c = (struct rtw89_h2c_chinfo_be *)skb->data;
6435
6436 h2c->ch_num = ch_num;
6437 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */
6438 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx,
6439 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK);
6440
6441 list_for_each_entry(ch_info, chan_list, list) {
6442 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem));
6443
6444 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) |
6445 le32_encode_bits(ch_info->central_ch,
6446 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) |
6447 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH);
6448
6449 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) |
6450 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) |
6451 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) |
6452 le32_encode_bits(ch_info->pause_data,
6453 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) |
6454 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) |
6455 le32_encode_bits(ch_info->rand_seq_num,
6456 RTW89_H2C_CHINFO_BE_W1_RANDOM) |
6457 le32_encode_bits(ch_info->notify_action,
6458 RTW89_H2C_CHINFO_BE_W1_NOTIFY) |
6459 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0,
6460 RTW89_H2C_CHINFO_BE_W1_PROBE) |
6461 le32_encode_bits(ch_info->leave_crit,
6462 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) |
6463 le32_encode_bits(ch_info->chkpt_timer,
6464 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER);
6465
6466 elem->w2 = le32_encode_bits(ch_info->leave_time,
6467 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) |
6468 le32_encode_bits(ch_info->leave_th,
6469 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) |
6470 le32_encode_bits(ch_info->tx_pkt_ctrl,
6471 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL);
6472
6473 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) |
6474 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) |
6475 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) |
6476 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3);
6477
6478 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) |
6479 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) |
6480 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) |
6481 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7);
6482
6483 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) |
6484 le32_encode_bits(ch_info->fw_probe0_ssids,
6485 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS);
6486
6487 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids,
6488 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) |
6489 le32_encode_bits(ch_info->fw_probe0_bssids,
6490 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS);
6491 if (ver == 0)
6492 elem->w0 |=
6493 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD);
6494 else
6495 elem->w7 = le32_encode_bits(ch_info->period,
6496 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1);
6497 }
6498
6499 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6500 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
6501 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len);
6502
6503 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH;
6504
6505 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
6506 if (ret) {
6507 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n");
6508 return ret;
6509 }
6510
6511 return 0;
6512 }
6513
rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif_link * rtwvif_link,bool wowlan)6514 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev,
6515 struct rtw89_scan_option *option,
6516 struct rtw89_vif_link *rtwvif_link,
6517 bool wowlan)
6518 {
6519 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6520 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
6521 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
6522 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE;
6523 struct rtw89_h2c_scanofld *h2c;
6524 u32 len = sizeof(*h2c);
6525 struct sk_buff *skb;
6526 unsigned int cond;
6527 u64 tsf = 0;
6528 int ret;
6529
6530 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6531 if (!skb) {
6532 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
6533 return -ENOMEM;
6534 }
6535 skb_put(skb, len);
6536 h2c = (struct rtw89_h2c_scanofld *)skb->data;
6537
6538 if (option->delay) {
6539 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
6540 if (ret) {
6541 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret);
6542 scan_mode = RTW89_SCAN_IMMEDIATE;
6543 } else {
6544 scan_mode = RTW89_SCAN_DELAY;
6545 tsf += (u64)option->delay * 1000;
6546 }
6547 }
6548
6549 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) |
6550 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) |
6551 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) |
6552 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION);
6553
6554 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) |
6555 le32_encode_bits(option->target_ch_mode,
6556 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) |
6557 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) |
6558 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE);
6559
6560 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) |
6561 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD);
6562
6563 if (option->target_ch_mode) {
6564 h2c->w1 |= le32_encode_bits(op->band_width,
6565 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) |
6566 le32_encode_bits(op->primary_channel,
6567 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) |
6568 le32_encode_bits(op->channel,
6569 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH);
6570 h2c->w0 |= le32_encode_bits(op->band_type,
6571 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND);
6572 }
6573
6574 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf),
6575 RTW89_H2C_SCANOFLD_W3_TSF_HIGH);
6576 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf),
6577 RTW89_H2C_SCANOFLD_W4_TSF_LOW);
6578
6579 if (scan_info->extra_op.set)
6580 h2c->w6 = le32_encode_bits(scan_info->extra_op.macid,
6581 RTW89_H2C_SCANOFLD_W6_SECOND_MACID);
6582
6583 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6584 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
6585 H2C_FUNC_SCANOFLD, 1, 1,
6586 len);
6587
6588 if (option->enable)
6589 cond = RTW89_SCANOFLD_WAIT_COND_START;
6590 else
6591 cond = RTW89_SCANOFLD_WAIT_COND_STOP;
6592
6593 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
6594 if (ret) {
6595 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n");
6596 return ret;
6597 }
6598
6599 return 0;
6600 }
6601
rtw89_scan_get_6g_disabled_chan(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option)6602 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
6603 struct rtw89_scan_option *option)
6604 {
6605 struct ieee80211_supported_band *sband;
6606 struct ieee80211_channel *chan;
6607 u8 i, idx;
6608
6609 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
6610 if (!sband) {
6611 option->prohib_chan = U64_MAX;
6612 return;
6613 }
6614
6615 for (i = 0; i < sband->n_channels; i++) {
6616 chan = &sband->channels[i];
6617 if (chan->flags & IEEE80211_CHAN_DISABLED) {
6618 idx = (chan->hw_value - 1) / 4;
6619 option->prohib_chan |= BIT(idx);
6620 }
6621 }
6622 }
6623
rtw89_fw_h2c_scan_offload_be(struct rtw89_dev * rtwdev,struct rtw89_scan_option * option,struct rtw89_vif_link * rtwvif_link,bool wowlan)6624 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
6625 struct rtw89_scan_option *option,
6626 struct rtw89_vif_link *rtwvif_link,
6627 bool wowlan)
6628 {
6629 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
6630 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
6631 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
6632 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
6633 struct cfg80211_scan_request *req = rtwvif->scan_req;
6634 struct rtw89_h2c_scanofld_be_macc_role *macc_role;
6635 struct rtw89_hw_scan_extra_op scan_op[2] = {};
6636 struct rtw89_chan *op = &scan_info->op_chan;
6637 struct rtw89_h2c_scanofld_be_opch *opch;
6638 struct rtw89_pktofld_info *pkt_info;
6639 struct rtw89_h2c_scanofld_be *h2c;
6640 struct ieee80211_vif *vif;
6641 struct sk_buff *skb;
6642 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
6643 u8 opch_size = sizeof(*opch) * option->num_opch;
6644 enum rtw89_scan_be_opmode opmode;
6645 u8 probe_id[NUM_NL80211_BANDS];
6646 u8 scan_offload_ver = U8_MAX;
6647 u8 cfg_len = sizeof(*h2c);
6648 unsigned int cond;
6649 u8 ver = U8_MAX;
6650 u8 policy_val;
6651 void *ptr;
6652 u8 txnull;
6653 u8 txbcn;
6654 int ret;
6655 u32 len;
6656 u8 i;
6657
6658 if (option->num_opch > RTW89_MAX_OP_NUM_BE) {
6659 rtw89_err(rtwdev, "num of scan OP chan %d over limit\n", option->num_opch);
6660 return -ENOENT;
6661 }
6662
6663 rtw89_scan_get_6g_disabled_chan(rtwdev, option);
6664
6665 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) {
6666 cfg_len = offsetofend(typeof(*h2c), w8);
6667 scan_offload_ver = 0;
6668 }
6669
6670 len = cfg_len + macc_role_size + opch_size;
6671 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6672 if (!skb) {
6673 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n");
6674 return -ENOMEM;
6675 }
6676
6677 skb_put(skb, len);
6678 h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
6679 ptr = skb->data;
6680
6681 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
6682
6683 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw))
6684 ver = 0;
6685
6686 if (!wowlan) {
6687 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
6688 if (pkt_info->wildcard_6ghz) {
6689 /* Provide wildcard as template */
6690 probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
6691 break;
6692 }
6693 }
6694 }
6695
6696 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
6697 le32_encode_bits(option->scan_mode,
6698 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
6699 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) |
6700 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) |
6701 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) |
6702 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) |
6703 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) |
6704 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND);
6705
6706 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) |
6707 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) |
6708 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD);
6709
6710 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) |
6711 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) |
6712 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END);
6713
6714 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) |
6715 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) |
6716 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) |
6717 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID);
6718
6719 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ],
6720 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) |
6721 le32_encode_bits(probe_id[NL80211_BAND_6GHZ],
6722 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) |
6723 le32_encode_bits(option->delay / 1000, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START);
6724
6725 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE);
6726
6727 h2c->w6 = le32_encode_bits(option->prohib_chan,
6728 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
6729 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
6730 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
6731 if (!wowlan && req->no_cck) {
6732 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
6733 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
6734 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
6735 le32_encode_bits(RTW89_HW_RATE_OFDM6,
6736 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) |
6737 le32_encode_bits(RTW89_HW_RATE_OFDM6,
6738 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
6739 }
6740
6741 if (scan_offload_ver == 0)
6742 goto flex_member;
6743
6744 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0),
6745 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) |
6746 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0),
6747 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) |
6748 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0),
6749 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP);
6750
6751 flex_member:
6752 ptr += cfg_len;
6753
6754 for (i = 0; i < option->num_macc_role; i++) {
6755 macc_role = ptr;
6756 macc_role->w0 =
6757 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) |
6758 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) |
6759 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) |
6760 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END);
6761 ptr += sizeof(*macc_role);
6762 }
6763
6764 for (i = 0; i < option->num_opch; i++) {
6765 struct rtw89_vif_link *rtwvif_link_op;
6766 bool is_ap;
6767
6768 switch (i) {
6769 case 0:
6770 scan_op[0].macid = rtwvif_link->mac_id;
6771 scan_op[0].port = rtwvif_link->port;
6772 scan_op[0].chan = *op;
6773 rtwvif_link_op = rtwvif_link;
6774 break;
6775 case 1:
6776 scan_op[1] = *ext;
6777 rtwvif_link_op = ext->rtwvif_link;
6778 break;
6779 }
6780
6781 vif = rtwvif_to_vif(rtwvif_link_op->rtwvif);
6782 is_ap = vif->type == NL80211_IFTYPE_AP;
6783 txnull = !is_zero_ether_addr(rtwvif_link_op->bssid) &&
6784 vif->type != NL80211_IFTYPE_AP;
6785 opmode = is_ap ? RTW89_SCAN_OPMODE_TBTT : RTW89_SCAN_OPMODE_INTV;
6786 policy_val = is_ap ? 2 : RTW89_OFF_CHAN_TIME / 10;
6787 txbcn = is_ap ? 1 : 0;
6788
6789 opch = ptr;
6790 opch->w0 = le32_encode_bits(scan_op[i].macid,
6791 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) |
6792 le32_encode_bits(option->band,
6793 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) |
6794 le32_encode_bits(scan_op[i].port,
6795 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) |
6796 le32_encode_bits(opmode,
6797 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) |
6798 le32_encode_bits(txnull,
6799 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) |
6800 le32_encode_bits(policy_val,
6801 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL);
6802
6803 opch->w1 = le32_encode_bits(scan_op[i].chan.band_type,
6804 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) |
6805 le32_encode_bits(scan_op[i].chan.band_width,
6806 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) |
6807 le32_encode_bits(0x3,
6808 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) |
6809 le32_encode_bits(scan_op[i].chan.primary_channel,
6810 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) |
6811 le32_encode_bits(scan_op[i].chan.channel,
6812 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH);
6813
6814 opch->w2 = le32_encode_bits(0,
6815 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) |
6816 le32_encode_bits(0,
6817 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) |
6818 le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2,
6819 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS) |
6820 le32_encode_bits(txbcn,
6821 RTW89_H2C_SCANOFLD_BE_OPCH_W2_TXBCN);
6822
6823 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
6824 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) |
6825 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
6826 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) |
6827 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
6828 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) |
6829 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE,
6830 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3);
6831
6832 if (ver == 0)
6833 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME,
6834 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION);
6835 else
6836 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME,
6837 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1);
6838 ptr += sizeof(*opch);
6839 }
6840
6841 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6842 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
6843 H2C_FUNC_SCANOFLD_BE, 1, 1,
6844 len);
6845
6846 if (option->enable)
6847 cond = RTW89_SCANOFLD_BE_WAIT_COND_START;
6848 else
6849 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP;
6850
6851 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
6852 if (ret) {
6853 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n");
6854 return ret;
6855 }
6856
6857 return 0;
6858 }
6859
rtw89_fw_h2c_rf_reg(struct rtw89_dev * rtwdev,struct rtw89_fw_h2c_rf_reg_info * info,u16 len,u8 page)6860 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
6861 struct rtw89_fw_h2c_rf_reg_info *info,
6862 u16 len, u8 page)
6863 {
6864 struct sk_buff *skb;
6865 u8 class = info->rf_path == RF_PATH_A ?
6866 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
6867 int ret;
6868
6869 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6870 if (!skb) {
6871 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
6872 return -ENOMEM;
6873 }
6874 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
6875
6876 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6877 H2C_CAT_OUTSRC, class, page, 0, 0,
6878 len);
6879
6880 ret = rtw89_h2c_tx(rtwdev, skb, false);
6881 if (ret) {
6882 rtw89_err(rtwdev, "failed to send h2c\n");
6883 goto fail;
6884 }
6885
6886 return 0;
6887 fail:
6888 dev_kfree_skb_any(skb);
6889
6890 return ret;
6891 }
6892
rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev * rtwdev)6893 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev)
6894 {
6895 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
6896 struct rtw89_fw_h2c_rf_get_mccch_v0 *mccch_v0;
6897 struct rtw89_fw_h2c_rf_get_mccch *mccch;
6898 u32 len = sizeof(*mccch);
6899 struct sk_buff *skb;
6900 u8 ver = U8_MAX;
6901 int ret;
6902 u8 idx;
6903
6904 if (RTW89_CHK_FW_FEATURE(RFK_NTFY_MCC_V0, &rtwdev->fw)) {
6905 len = sizeof(*mccch_v0);
6906 ver = 0;
6907 }
6908
6909 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6910 if (!skb) {
6911 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
6912 return -ENOMEM;
6913 }
6914 skb_put(skb, len);
6915
6916 idx = rfk_mcc->table_idx;
6917 if (ver == 0) {
6918 mccch_v0 = (struct rtw89_fw_h2c_rf_get_mccch_v0 *)skb->data;
6919 mccch_v0->ch_0 = cpu_to_le32(rfk_mcc->ch[0]);
6920 mccch_v0->ch_1 = cpu_to_le32(rfk_mcc->ch[1]);
6921 mccch_v0->band_0 = cpu_to_le32(rfk_mcc->band[0]);
6922 mccch_v0->band_1 = cpu_to_le32(rfk_mcc->band[1]);
6923 mccch_v0->current_band_type = cpu_to_le32(rfk_mcc->band[idx]);
6924 mccch_v0->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
6925 } else {
6926 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data;
6927 mccch->ch_0_0 = cpu_to_le32(rfk_mcc->ch[0]);
6928 mccch->ch_0_1 = cpu_to_le32(rfk_mcc->ch[0]);
6929 mccch->ch_1_0 = cpu_to_le32(rfk_mcc->ch[1]);
6930 mccch->ch_1_1 = cpu_to_le32(rfk_mcc->ch[1]);
6931 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]);
6932 }
6933
6934 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6935 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
6936 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0,
6937 len);
6938
6939 ret = rtw89_h2c_tx(rtwdev, skb, false);
6940 if (ret) {
6941 rtw89_err(rtwdev, "failed to send h2c\n");
6942 goto fail;
6943 }
6944
6945 return 0;
6946 fail:
6947 dev_kfree_skb_any(skb);
6948
6949 return ret;
6950 }
6951 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc);
6952
rtw89_fw_h2c_mcc_dig(struct rtw89_dev * rtwdev,enum rtw89_chanctx_idx chanctx_idx,u8 mcc_role_idx,u8 pd_val,bool en)6953 int rtw89_fw_h2c_mcc_dig(struct rtw89_dev *rtwdev,
6954 enum rtw89_chanctx_idx chanctx_idx,
6955 u8 mcc_role_idx, u8 pd_val, bool en)
6956 {
6957 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx);
6958 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs;
6959 struct rtw89_h2c_mcc_dig *h2c;
6960 u32 len = sizeof(*h2c);
6961 struct sk_buff *skb;
6962 int ret;
6963
6964 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
6965 if (!skb) {
6966 rtw89_err(rtwdev, "failed to alloc skb for h2c mcc_dig\n");
6967 return -ENOMEM;
6968 }
6969 skb_put(skb, len);
6970 h2c = (struct rtw89_h2c_mcc_dig *)skb->data;
6971
6972 h2c->w0 = le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_REG_CNT) |
6973 le32_encode_bits(en, RTW89_H2C_MCC_DIG_W0_DM_EN) |
6974 le32_encode_bits(mcc_role_idx, RTW89_H2C_MCC_DIG_W0_IDX) |
6975 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_SET) |
6976 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_PHY0_EN) |
6977 le32_encode_bits(chan->channel, RTW89_H2C_MCC_DIG_W0_CENTER_CH) |
6978 le32_encode_bits(chan->band_type, RTW89_H2C_MCC_DIG_W0_BAND_TYPE);
6979 h2c->w1 = le32_encode_bits(dig_regs->seg0_pd_reg,
6980 RTW89_H2C_MCC_DIG_W1_ADDR_LSB) |
6981 le32_encode_bits(dig_regs->seg0_pd_reg >> 8,
6982 RTW89_H2C_MCC_DIG_W1_ADDR_MSB) |
6983 le32_encode_bits(dig_regs->pd_lower_bound_mask,
6984 RTW89_H2C_MCC_DIG_W1_BMASK_LSB) |
6985 le32_encode_bits(dig_regs->pd_lower_bound_mask >> 8,
6986 RTW89_H2C_MCC_DIG_W1_BMASK_MSB);
6987 h2c->w2 = le32_encode_bits(pd_val, RTW89_H2C_MCC_DIG_W2_VAL_LSB);
6988
6989 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
6990 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
6991 H2C_FUNC_FW_MCC_DIG, 0, 0, len);
6992
6993 ret = rtw89_h2c_tx(rtwdev, skb, false);
6994 if (ret) {
6995 rtw89_err(rtwdev, "failed to send h2c\n");
6996 goto fail;
6997 }
6998
6999 return 0;
7000 fail:
7001 dev_kfree_skb_any(skb);
7002
7003 return ret;
7004 }
7005
rtw89_fw_h2c_rf_ps_info(struct rtw89_dev * rtwdev,struct rtw89_vif * rtwvif)7006 int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
7007 {
7008 const struct rtw89_chip_info *chip = rtwdev->chip;
7009 struct rtw89_vif_link *rtwvif_link;
7010 struct rtw89_h2c_rf_ps_info *h2c;
7011 const struct rtw89_chan *chan;
7012 u32 len = sizeof(*h2c);
7013 unsigned int link_id;
7014 struct sk_buff *skb;
7015 int ret;
7016 u8 path;
7017 u32 val;
7018
7019 if (chip->chip_gen != RTW89_CHIP_BE)
7020 return 0;
7021
7022 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7023 if (!skb) {
7024 rtw89_err(rtwdev, "failed to alloc skb for h2c rf ps info\n");
7025 return -ENOMEM;
7026 }
7027 skb_put(skb, len);
7028 h2c = (struct rtw89_h2c_rf_ps_info *)skb->data;
7029 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
7030
7031 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
7032 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx);
7033 path = rtw89_phy_get_syn_sel(rtwdev, rtwvif_link->phy_idx);
7034 val = rtw89_chip_chan_to_rf18_val(rtwdev, chan);
7035
7036 if (path >= chip->rf_path_num || path >= NUM_OF_RTW89_FW_RFK_PATH) {
7037 rtw89_err(rtwdev, "unsupported rf path (%d)\n", path);
7038 ret = -ENOENT;
7039 goto fail;
7040 }
7041
7042 h2c->rf18[path] = cpu_to_le32(val);
7043 h2c->pri_ch[path] = chan->primary_channel;
7044 }
7045
7046 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7047 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
7048 H2C_FUNC_OUTSRC_RF_PS_INFO, 0, 0,
7049 sizeof(*h2c));
7050
7051 ret = rtw89_h2c_tx(rtwdev, skb, false);
7052 if (ret) {
7053 rtw89_err(rtwdev, "failed to send h2c\n");
7054 goto fail;
7055 }
7056
7057 return 0;
7058 fail:
7059 dev_kfree_skb_any(skb);
7060
7061 return ret;
7062 }
7063 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ps_info);
7064
rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)7065 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev,
7066 enum rtw89_phy_idx phy_idx)
7067 {
7068 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
7069 struct rtw89_fw_h2c_rfk_pre_info_common *common;
7070 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0;
7071 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1;
7072 struct rtw89_fw_h2c_rfk_pre_info_v2 *h2c_v2;
7073 struct rtw89_fw_h2c_rfk_pre_info *h2c;
7074 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH];
7075 u32 len = sizeof(*h2c);
7076 struct sk_buff *skb;
7077 u8 ver = U8_MAX;
7078 u8 tbl, path;
7079 u32 val32;
7080 int ret;
7081
7082 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V3, &rtwdev->fw)) {
7083 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V2, &rtwdev->fw)) {
7084 len = sizeof(*h2c_v2);
7085 ver = 2;
7086 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) {
7087 len = sizeof(*h2c_v1);
7088 ver = 1;
7089 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) {
7090 len = sizeof(*h2c_v0);
7091 ver = 0;
7092 }
7093
7094 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7095 if (!skb) {
7096 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n");
7097 return -ENOMEM;
7098 }
7099 skb_put(skb, len);
7100
7101 if (ver <= 2)
7102 goto old_format;
7103
7104 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data;
7105
7106 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
7107 h2c->phy_idx = cpu_to_le32(phy_idx);
7108 h2c->mlo_1_1 = cpu_to_le32(rtw89_is_mlo_1_1(rtwdev));
7109
7110 goto done;
7111
7112 old_format:
7113 h2c_v2 = (struct rtw89_fw_h2c_rfk_pre_info_v2 *)skb->data;
7114 common = &h2c_v2->base_v1.common;
7115
7116 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
7117
7118 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
7119 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH);
7120
7121 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
7122 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
7123 common->dbcc.ch[path][tbl] =
7124 cpu_to_le32(rfk_mcc->data[path].ch[tbl]);
7125 common->dbcc.band[path][tbl] =
7126 cpu_to_le32(rfk_mcc->data[path].band[tbl]);
7127 }
7128 }
7129
7130 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
7131 tbl_sel[path] = rfk_mcc->data[path].table_idx;
7132
7133 common->tbl.cur_ch[path] =
7134 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]);
7135 common->tbl.cur_band[path] =
7136 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]);
7137
7138 if (ver <= 1)
7139 continue;
7140
7141 h2c_v2->cur_bandwidth[path] =
7142 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]);
7143 }
7144
7145 common->phy_idx = cpu_to_le32(phy_idx);
7146
7147 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */
7148 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data;
7149
7150 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]);
7151 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]);
7152 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]);
7153
7154 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1);
7155 h2c_v0->ktbl_sel0 = cpu_to_le32(val32);
7156 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1);
7157 h2c_v0->ktbl_sel1 = cpu_to_le32(val32);
7158 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK);
7159 h2c_v0->rfmod0 = cpu_to_le32(val32);
7160 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK);
7161 h2c_v0->rfmod1 = cpu_to_le32(val32);
7162
7163 if (rtw89_is_mlo_1_1(rtwdev))
7164 h2c_v0->mlo_1_1 = cpu_to_le32(1);
7165
7166 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type);
7167
7168 goto done;
7169 }
7170
7171 if (rtw89_is_mlo_1_1(rtwdev)) {
7172 h2c_v1 = &h2c_v2->base_v1;
7173 h2c_v1->mlo_1_1 = cpu_to_le32(1);
7174 }
7175 done:
7176 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7177 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7178 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0,
7179 len);
7180
7181 ret = rtw89_h2c_tx(rtwdev, skb, false);
7182 if (ret) {
7183 rtw89_err(rtwdev, "failed to send h2c\n");
7184 goto fail;
7185 }
7186
7187 return 0;
7188 fail:
7189 dev_kfree_skb_any(skb);
7190
7191 return ret;
7192 }
7193
rtw89_fw_h2c_rf_pre_ntfy_mcc(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx)7194 int rtw89_fw_h2c_rf_pre_ntfy_mcc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
7195 {
7196 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data;
7197 struct rtw89_rfk_mcc_info *rfk_mcc_v0 = &rtwdev->rfk_mcc;
7198 struct rtw89_fw_h2c_rfk_pre_info_mcc_v0 *h2c_v0;
7199 struct rtw89_fw_h2c_rfk_pre_info_mcc_v1 *h2c_v1;
7200 struct rtw89_fw_h2c_rfk_pre_info_mcc *h2c;
7201 struct rtw89_hal *hal = &rtwdev->hal;
7202 u32 len = sizeof(*h2c);
7203 struct sk_buff *skb;
7204 u8 ver = U8_MAX;
7205 u8 tbl, path;
7206 u8 tbl_sel;
7207 int ret;
7208
7209 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V2, &rtwdev->fw)) {
7210 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V1, &rtwdev->fw)) {
7211 len = sizeof(*h2c_v1);
7212 ver = 1;
7213 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V0, &rtwdev->fw)) {
7214 len = sizeof(*h2c_v0);
7215 ver = 0;
7216 }
7217
7218 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7219 if (!skb) {
7220 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy_mcc\n");
7221 return -ENOMEM;
7222 }
7223 skb_put(skb, len);
7224
7225 if (ver != 0)
7226 goto v1;
7227
7228 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_mcc_v0 *)skb->data;
7229 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) {
7230 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
7231 h2c_v0->tbl_18[tbl][path] =
7232 cpu_to_le32(rfk_mcc_v0->data[path].rf18[tbl]);
7233 tbl_sel = rfk_mcc_v0->data[path].table_idx;
7234 h2c_v0->cur_18[path] =
7235 cpu_to_le32(rfk_mcc_v0->data[path].rf18[tbl_sel]);
7236 }
7237 }
7238
7239 h2c_v0->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
7240 goto done;
7241
7242 v1:
7243 h2c_v1 = (struct rtw89_fw_h2c_rfk_pre_info_mcc_v1 *)skb->data;
7244
7245 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR);
7246
7247 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++)
7248 h2c_v1->tbl_18[tbl] = cpu_to_le32(rfk_mcc->rf18[tbl]);
7249
7250 BUILD_BUG_ON(ARRAY_SIZE(rtwdev->rfk_mcc.data) < NUM_OF_RTW89_FW_RFK_PATH);
7251
7252 /* shared table array, but tbl_sel can be independent by path */
7253 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) {
7254 tbl = rfk_mcc[path].table_idx;
7255 h2c_v1->cur_18[path] = cpu_to_le32(rfk_mcc->rf18[tbl]);
7256
7257 if (path == phy_idx)
7258 h2c_v1->tbl_idx = tbl;
7259 }
7260
7261 h2c_v1->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode);
7262 h2c_v1->phy_idx = phy_idx;
7263
7264 if (rtw89_is_mlo_1_1(rtwdev))
7265 h2c_v1->mlo_1_1 = cpu_to_le32(1);
7266
7267 if (ver == 1)
7268 goto done;
7269
7270 h2c = (struct rtw89_fw_h2c_rfk_pre_info_mcc *)skb->data;
7271
7272 h2c->aid = cpu_to_le32(hal->aid);
7273
7274 done:
7275 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7276 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY,
7277 H2C_FUNC_OUTSRC_RF_MCC_INFO, 0, 0, len);
7278
7279 ret = rtw89_h2c_tx(rtwdev, skb, false);
7280 if (ret) {
7281 rtw89_err(rtwdev, "failed to send h2c\n");
7282 goto fail;
7283 }
7284
7285 return 0;
7286 fail:
7287 dev_kfree_skb_any(skb);
7288
7289 return ret;
7290 }
7291
rtw89_fw_h2c_rf_tssi(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,enum rtw89_tssi_mode tssi_mode)7292 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
7293 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode)
7294 {
7295 const struct rtw89_chip_info *chip = rtwdev->chip;
7296 struct rtw89_efuse *efuse = &rtwdev->efuse;
7297 struct rtw89_hal *hal = &rtwdev->hal;
7298 struct rtw89_h2c_rf_tssi *h2c;
7299 u32 len = sizeof(*h2c);
7300 struct sk_buff *skb;
7301 int ret;
7302
7303 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7304 if (!skb) {
7305 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n");
7306 return -ENOMEM;
7307 }
7308 skb_put(skb, len);
7309 h2c = (struct rtw89_h2c_rf_tssi *)skb->data;
7310
7311 h2c->len = cpu_to_le16(len);
7312 h2c->phy = phy_idx;
7313 h2c->ch = chan->channel;
7314 h2c->bw = chan->band_width;
7315 h2c->band = chan->band_type;
7316 h2c->cv = hal->cv;
7317 h2c->tssi_mode = tssi_mode;
7318 h2c->rfe_type = efuse->rfe_type;
7319
7320 if (chip->chip_id == RTL8922A)
7321 h2c->hwtx_en = true;
7322 else
7323 h2c->hwtx_en = false;
7324
7325 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c);
7326 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c);
7327
7328 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7329 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7330 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len);
7331
7332 ret = rtw89_h2c_tx(rtwdev, skb, false);
7333 if (ret) {
7334 rtw89_err(rtwdev, "failed to send h2c\n");
7335 goto fail;
7336 }
7337
7338 return 0;
7339 fail:
7340 dev_kfree_skb_any(skb);
7341
7342 return ret;
7343 }
7344
rtw89_fw_h2c_rf_iqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)7345 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
7346 const struct rtw89_chan *chan)
7347 {
7348 struct rtw89_hal *hal = &rtwdev->hal;
7349 struct rtw89_h2c_rf_iqk_v0 *h2c_v0;
7350 struct rtw89_h2c_rf_iqk *h2c;
7351 u32 len = sizeof(*h2c);
7352 struct sk_buff *skb;
7353 u8 ver = U8_MAX;
7354 int ret;
7355
7356 if (RTW89_CHK_FW_FEATURE(RFK_IQK_V0, &rtwdev->fw)) {
7357 len = sizeof(*h2c_v0);
7358 ver = 0;
7359 }
7360
7361 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7362 if (!skb) {
7363 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n");
7364 return -ENOMEM;
7365 }
7366 skb_put(skb, len);
7367
7368 if (ver == 0) {
7369 h2c_v0 = (struct rtw89_h2c_rf_iqk_v0 *)skb->data;
7370
7371 h2c_v0->phy_idx = cpu_to_le32(phy_idx);
7372 h2c_v0->dbcc = cpu_to_le32(rtwdev->dbcc_en);
7373
7374 goto done;
7375 }
7376
7377 h2c = (struct rtw89_h2c_rf_iqk *)skb->data;
7378
7379 h2c->len = sizeof(*h2c);
7380 h2c->ktype = 0;
7381 h2c->phy = phy_idx;
7382 h2c->kpath = rtw89_phy_get_kpath(rtwdev, phy_idx);
7383 h2c->band = chan->band_type;
7384 h2c->bw = chan->band_width;
7385 h2c->ch = chan->channel;
7386 h2c->cv = hal->cv;
7387
7388 done:
7389 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7390 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7391 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len);
7392
7393 ret = rtw89_h2c_tx(rtwdev, skb, false);
7394 if (ret) {
7395 rtw89_err(rtwdev, "failed to send h2c\n");
7396 goto fail;
7397 }
7398
7399 return 0;
7400 fail:
7401 dev_kfree_skb_any(skb);
7402
7403 return ret;
7404 }
7405
rtw89_fw_h2c_rf_dpk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)7406 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
7407 const struct rtw89_chan *chan)
7408 {
7409 struct rtw89_h2c_rf_dpk *h2c;
7410 u32 len = sizeof(*h2c);
7411 struct sk_buff *skb;
7412 int ret;
7413
7414 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7415 if (!skb) {
7416 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n");
7417 return -ENOMEM;
7418 }
7419 skb_put(skb, len);
7420 h2c = (struct rtw89_h2c_rf_dpk *)skb->data;
7421
7422 h2c->len = len;
7423 h2c->phy = phy_idx;
7424 h2c->dpk_enable = true;
7425 h2c->kpath = RF_AB;
7426 h2c->cur_band = chan->band_type;
7427 h2c->cur_bw = chan->band_width;
7428 h2c->cur_ch = chan->channel;
7429 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
7430
7431 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7432 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7433 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len);
7434
7435 ret = rtw89_h2c_tx(rtwdev, skb, false);
7436 if (ret) {
7437 rtw89_err(rtwdev, "failed to send h2c\n");
7438 goto fail;
7439 }
7440
7441 return 0;
7442 fail:
7443 dev_kfree_skb_any(skb);
7444
7445 return ret;
7446 }
7447
rtw89_fw_h2c_rf_txgapk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)7448 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
7449 const struct rtw89_chan *chan)
7450 {
7451 struct rtw89_hal *hal = &rtwdev->hal;
7452 struct rtw89_h2c_rf_txgapk *h2c;
7453 u32 len = sizeof(*h2c);
7454 struct sk_buff *skb;
7455 int ret;
7456
7457 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7458 if (!skb) {
7459 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n");
7460 return -ENOMEM;
7461 }
7462 skb_put(skb, len);
7463 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data;
7464
7465 h2c->len = len;
7466 h2c->ktype = 2;
7467 h2c->phy = phy_idx;
7468 h2c->kpath = RF_AB;
7469 h2c->band = chan->band_type;
7470 h2c->bw = chan->band_width;
7471 h2c->ch = chan->channel;
7472 h2c->cv = hal->cv;
7473
7474 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7475 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7476 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len);
7477
7478 ret = rtw89_h2c_tx(rtwdev, skb, false);
7479 if (ret) {
7480 rtw89_err(rtwdev, "failed to send h2c\n");
7481 goto fail;
7482 }
7483
7484 return 0;
7485 fail:
7486 dev_kfree_skb_any(skb);
7487
7488 return ret;
7489 }
7490
rtw89_fw_h2c_rf_dack(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)7491 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
7492 const struct rtw89_chan *chan)
7493 {
7494 struct rtw89_h2c_rf_dack *h2c;
7495 u32 len = sizeof(*h2c);
7496 struct sk_buff *skb;
7497 int ret;
7498
7499 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7500 if (!skb) {
7501 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n");
7502 return -ENOMEM;
7503 }
7504 skb_put(skb, len);
7505 h2c = (struct rtw89_h2c_rf_dack *)skb->data;
7506
7507 h2c->len = len;
7508 h2c->phy = phy_idx;
7509 h2c->type = 0;
7510
7511 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7512 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7513 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len);
7514
7515 ret = rtw89_h2c_tx(rtwdev, skb, false);
7516 if (ret) {
7517 rtw89_err(rtwdev, "failed to send h2c\n");
7518 goto fail;
7519 }
7520
7521 return 0;
7522 fail:
7523 dev_kfree_skb_any(skb);
7524
7525 return ret;
7526 }
7527
rtw89_fw_h2c_rf_rxdck(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan,bool is_chl_k)7528 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
7529 const struct rtw89_chan *chan, bool is_chl_k)
7530 {
7531 struct rtw89_h2c_rf_rxdck_v0 *v0;
7532 struct rtw89_h2c_rf_rxdck *h2c;
7533 u32 len = sizeof(*h2c);
7534 struct sk_buff *skb;
7535 int ver = -1;
7536 int ret;
7537
7538 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) {
7539 len = sizeof(*v0);
7540 ver = 0;
7541 }
7542
7543 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7544 if (!skb) {
7545 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n");
7546 return -ENOMEM;
7547 }
7548 skb_put(skb, len);
7549 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data;
7550
7551 v0->len = len;
7552 v0->phy = phy_idx;
7553 v0->is_afe = false;
7554 v0->kpath = RF_AB;
7555 v0->cur_band = chan->band_type;
7556 v0->cur_bw = chan->band_width;
7557 v0->cur_ch = chan->channel;
7558 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
7559
7560 if (ver == 0)
7561 goto hdr;
7562
7563 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data;
7564 h2c->is_chl_k = is_chl_k;
7565
7566 hdr:
7567 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7568 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7569 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len);
7570
7571 ret = rtw89_h2c_tx(rtwdev, skb, false);
7572 if (ret) {
7573 rtw89_err(rtwdev, "failed to send h2c\n");
7574 goto fail;
7575 }
7576
7577 return 0;
7578 fail:
7579 dev_kfree_skb_any(skb);
7580
7581 return ret;
7582 }
7583
rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev * rtwdev,bool enable)7584 int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable)
7585 {
7586 struct rtw89_h2c_rf_tas *h2c;
7587 u32 len = sizeof(*h2c);
7588 struct sk_buff *skb;
7589 int ret;
7590
7591 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7592 if (!skb) {
7593 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TAS\n");
7594 return -ENOMEM;
7595 }
7596 skb_put(skb, len);
7597 h2c = (struct rtw89_h2c_rf_tas *)skb->data;
7598
7599 h2c->enable = cpu_to_le32(enable);
7600
7601 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7602 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7603 H2C_FUNC_RFK_TAS_OFFLOAD, 0, 0, len);
7604
7605 ret = rtw89_h2c_tx(rtwdev, skb, false);
7606 if (ret) {
7607 rtw89_err(rtwdev, "failed to send h2c\n");
7608 goto fail;
7609 }
7610
7611 return 0;
7612 fail:
7613 dev_kfree_skb_any(skb);
7614
7615 return ret;
7616 }
7617
rtw89_fw_h2c_rf_txiqk(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)7618 int rtw89_fw_h2c_rf_txiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
7619 const struct rtw89_chan *chan)
7620 {
7621 struct rtw89_h2c_rf_txiqk *h2c;
7622 u32 len = sizeof(*h2c);
7623 struct sk_buff *skb;
7624 int ret;
7625
7626 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7627 if (!skb) {
7628 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXIQK\n");
7629 return -ENOMEM;
7630 }
7631 skb_put(skb, len);
7632 h2c = (struct rtw89_h2c_rf_txiqk *)skb->data;
7633
7634 h2c->len = len;
7635 h2c->phy = phy_idx;
7636 h2c->txiqk_enable = true;
7637 h2c->is_wb_txiqk = true;
7638 h2c->kpath = RF_AB;
7639 h2c->cur_band = chan->band_type;
7640 h2c->cur_bw = chan->band_width;
7641 h2c->cur_ch = chan->channel;
7642 h2c->txiqk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
7643
7644 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7645 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7646 H2C_FUNC_RFK_TXIQK_OFFOAD, 0, 0, len);
7647
7648 ret = rtw89_h2c_tx(rtwdev, skb, false);
7649 if (ret) {
7650 rtw89_err(rtwdev, "failed to send h2c\n");
7651 goto fail;
7652 }
7653
7654 return 0;
7655 fail:
7656 dev_kfree_skb_any(skb);
7657
7658 return ret;
7659 }
7660
rtw89_fw_h2c_rf_cim3k(struct rtw89_dev * rtwdev,enum rtw89_phy_idx phy_idx,const struct rtw89_chan * chan)7661 int rtw89_fw_h2c_rf_cim3k(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
7662 const struct rtw89_chan *chan)
7663 {
7664 struct rtw89_h2c_rf_cim3k *h2c;
7665 u32 len = sizeof(*h2c);
7666 struct sk_buff *skb;
7667 int ret;
7668
7669 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7670 if (!skb) {
7671 rtw89_err(rtwdev, "failed to alloc skb for h2c RF CIM3K\n");
7672 return -ENOMEM;
7673 }
7674 skb_put(skb, len);
7675 h2c = (struct rtw89_h2c_rf_cim3k *)skb->data;
7676
7677 h2c->len = len;
7678 h2c->phy = phy_idx;
7679 h2c->kpath = RF_AB;
7680 h2c->cur_band = chan->band_type;
7681 h2c->cur_bw = chan->band_width;
7682 h2c->cur_ch = chan->channel;
7683 h2c->cim3k_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK);
7684
7685 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7686 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK,
7687 H2C_FUNC_RFK_CIM3K_OFFOAD, 0, 0, len);
7688
7689 ret = rtw89_h2c_tx(rtwdev, skb, false);
7690 if (ret) {
7691 rtw89_err(rtwdev, "failed to send h2c\n");
7692 goto fail;
7693 }
7694
7695 return 0;
7696 fail:
7697 dev_kfree_skb_any(skb);
7698
7699 return ret;
7700 }
7701
rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev * rtwdev,u8 h2c_class,u8 h2c_func,u8 * buf,u16 len,bool rack,bool dack)7702 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
7703 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
7704 bool rack, bool dack)
7705 {
7706 struct sk_buff *skb;
7707 int ret;
7708
7709 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
7710 if (!skb) {
7711 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
7712 return -ENOMEM;
7713 }
7714 skb_put_data(skb, buf, len);
7715
7716 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
7717 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
7718 len);
7719
7720 ret = rtw89_h2c_tx(rtwdev, skb, false);
7721 if (ret) {
7722 rtw89_err(rtwdev, "failed to send h2c\n");
7723 goto fail;
7724 }
7725
7726 return 0;
7727 fail:
7728 dev_kfree_skb_any(skb);
7729
7730 return ret;
7731 }
7732
rtw89_fw_h2c_raw(struct rtw89_dev * rtwdev,const u8 * buf,u16 len)7733 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
7734 {
7735 struct sk_buff *skb;
7736 int ret;
7737
7738 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len);
7739 if (!skb) {
7740 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
7741 return -ENOMEM;
7742 }
7743 skb_put_data(skb, buf, len);
7744
7745 ret = rtw89_h2c_tx(rtwdev, skb, false);
7746 if (ret) {
7747 rtw89_err(rtwdev, "failed to send h2c\n");
7748 goto fail;
7749 }
7750
7751 return 0;
7752 fail:
7753 dev_kfree_skb_any(skb);
7754
7755 return ret;
7756 }
7757
rtw89_fw_send_all_early_h2c(struct rtw89_dev * rtwdev)7758 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
7759 {
7760 struct rtw89_early_h2c *early_h2c;
7761
7762 lockdep_assert_wiphy(rtwdev->hw->wiphy);
7763
7764 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
7765 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
7766 }
7767 }
7768
__rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)7769 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
7770 {
7771 struct rtw89_early_h2c *early_h2c, *tmp;
7772
7773 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
7774 list_del(&early_h2c->list);
7775 kfree(early_h2c->h2c);
7776 kfree(early_h2c);
7777 }
7778 }
7779
rtw89_fw_free_all_early_h2c(struct rtw89_dev * rtwdev)7780 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
7781 {
7782 lockdep_assert_wiphy(rtwdev->hw->wiphy);
7783
7784 __rtw89_fw_free_all_early_h2c(rtwdev);
7785 }
7786
rtw89_fw_c2h_dummy_handler(struct rtw89_dev * rtwdev,struct sk_buff * c2h,u32 len)7787 void rtw89_fw_c2h_dummy_handler(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
7788 {
7789 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
7790 u8 category = attr->category;
7791 u8 class = attr->class;
7792 u8 func = attr->func;
7793
7794 rtw89_debug(rtwdev, RTW89_DBG_FW,
7795 "C2H cate=%u cls=%u func=%u is dummy\n", category, class, func);
7796 }
7797
rtw89_fw_c2h_parse_attr(struct sk_buff * c2h)7798 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h)
7799 {
7800 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data;
7801 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
7802
7803 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY);
7804 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS);
7805 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC);
7806 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN);
7807 }
7808
rtw89_fw_c2h_chk_atomic(struct rtw89_dev * rtwdev,struct sk_buff * c2h)7809 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev,
7810 struct sk_buff *c2h)
7811 {
7812 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h);
7813 u8 category = attr->category;
7814 u8 class = attr->class;
7815 u8 func = attr->func;
7816
7817 switch (category) {
7818 default:
7819 return false;
7820 case RTW89_C2H_CAT_MAC:
7821 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func);
7822 case RTW89_C2H_CAT_OUTSRC:
7823 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func);
7824 }
7825 }
7826
rtw89_fw_c2h_irqsafe(struct rtw89_dev * rtwdev,struct sk_buff * c2h)7827 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
7828 {
7829 rtw89_fw_c2h_parse_attr(c2h);
7830 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h))
7831 goto enqueue;
7832
7833 rtw89_fw_c2h_cmd_handle(rtwdev, c2h);
7834 dev_kfree_skb_any(c2h);
7835 return;
7836
7837 enqueue:
7838 skb_queue_tail(&rtwdev->c2h_queue, c2h);
7839 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work);
7840 }
7841
rtw89_fw_c2h_cmd_handle(struct rtw89_dev * rtwdev,struct sk_buff * skb)7842 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
7843 struct sk_buff *skb)
7844 {
7845 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
7846 u8 category = attr->category;
7847 u8 class = attr->class;
7848 u8 func = attr->func;
7849 u16 len = attr->len;
7850 bool dump = true;
7851
7852 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
7853 return;
7854
7855 switch (category) {
7856 case RTW89_C2H_CAT_TEST:
7857 break;
7858 case RTW89_C2H_CAT_MAC:
7859 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
7860 if (class == RTW89_MAC_C2H_CLASS_INFO &&
7861 func == RTW89_MAC_C2H_FUNC_C2H_LOG)
7862 dump = false;
7863 break;
7864 case RTW89_C2H_CAT_OUTSRC:
7865 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
7866 class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
7867 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
7868 else
7869 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
7870 break;
7871 }
7872
7873 if (dump)
7874 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
7875 }
7876
rtw89_fw_c2h_work(struct wiphy * wiphy,struct wiphy_work * work)7877 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work)
7878 {
7879 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
7880 c2h_work);
7881 struct sk_buff *skb, *tmp;
7882 struct sk_buff_head c2hq;
7883 unsigned long flags;
7884
7885 lockdep_assert_wiphy(rtwdev->hw->wiphy);
7886
7887 __skb_queue_head_init(&c2hq);
7888
7889 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags);
7890 skb_queue_splice_init(&rtwdev->c2h_queue, &c2hq);
7891 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags);
7892
7893 skb_queue_walk_safe(&c2hq, skb, tmp) {
7894 rtw89_fw_c2h_cmd_handle(rtwdev, skb);
7895 dev_kfree_skb_any(skb);
7896 }
7897 }
7898
rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev * rtwdev)7899 void rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev *rtwdev)
7900 {
7901 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
7902 struct sk_buff *skb, *tmp;
7903 struct sk_buff_head c2hq;
7904 unsigned long flags;
7905
7906 lockdep_assert_wiphy(rtwdev->hw->wiphy);
7907
7908 __skb_queue_head_init(&c2hq);
7909
7910 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags);
7911 skb_queue_splice_init(&rtwdev->c2h_queue, &c2hq);
7912 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags);
7913
7914 skb_queue_walk_safe(&c2hq, skb, tmp) {
7915 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb);
7916
7917 if (!attr->is_scan_event || attr->scan_seq == scan_info->seq)
7918 continue;
7919
7920 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
7921 "purge obsoleted scan event with seq=%d (cur=%d)\n",
7922 attr->scan_seq, scan_info->seq);
7923
7924 __skb_unlink(skb, &c2hq);
7925 dev_kfree_skb_any(skb);
7926 }
7927
7928 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags);
7929 skb_queue_splice(&c2hq, &rtwdev->c2h_queue);
7930 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags);
7931 }
7932
rtw89_fw_write_h2c_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * info)7933 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
7934 struct rtw89_mac_h2c_info *info)
7935 {
7936 const struct rtw89_chip_info *chip = rtwdev->chip;
7937 struct rtw89_fw_info *fw_info = &rtwdev->fw;
7938 const u32 *h2c_reg = chip->h2c_regs;
7939 u8 i, val, len;
7940 int ret;
7941
7942 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
7943 rtwdev, chip->h2c_ctrl_reg);
7944 if (ret) {
7945 rtw89_warn(rtwdev, "FW does not process h2c registers\n");
7946 return ret;
7947 }
7948
7949 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
7950 sizeof(info->u.h2creg[0]));
7951
7952 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK);
7953 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK);
7954
7955 for (i = 0; i < RTW89_H2CREG_MAX; i++)
7956 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]);
7957
7958 fw_info->h2c_counter++;
7959 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr,
7960 chip->h2c_counter_reg.mask, fw_info->h2c_counter);
7961 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER);
7962
7963 return 0;
7964 }
7965
rtw89_fw_read_c2h_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_c2h_info * info)7966 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
7967 struct rtw89_mac_c2h_info *info)
7968 {
7969 const struct rtw89_chip_info *chip = rtwdev->chip;
7970 struct rtw89_fw_info *fw_info = &rtwdev->fw;
7971 const u32 *c2h_reg = chip->c2h_regs;
7972 u32 timeout;
7973 u8 i, val;
7974 int ret;
7975
7976 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
7977
7978 if (rtwdev->hci.type == RTW89_HCI_TYPE_USB)
7979 timeout = RTW89_C2H_TIMEOUT_USB;
7980 else
7981 timeout = RTW89_C2H_TIMEOUT;
7982
7983 if (info->timeout)
7984 timeout = info->timeout;
7985
7986 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
7987 timeout, false, rtwdev,
7988 chip->c2h_ctrl_reg);
7989 if (ret) {
7990 rtw89_warn(rtwdev, "c2h reg timeout\n");
7991 return ret;
7992 }
7993
7994 for (i = 0; i < RTW89_C2HREG_MAX; i++)
7995 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
7996
7997 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0);
7998
7999 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK);
8000 info->content_len =
8001 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) -
8002 RTW89_C2HREG_HDR_LEN;
8003
8004 fw_info->c2h_counter++;
8005 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr,
8006 chip->c2h_counter_reg.mask, fw_info->c2h_counter);
8007
8008 return 0;
8009 }
8010
rtw89_fw_msg_reg(struct rtw89_dev * rtwdev,struct rtw89_mac_h2c_info * h2c_info,struct rtw89_mac_c2h_info * c2h_info)8011 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
8012 struct rtw89_mac_h2c_info *h2c_info,
8013 struct rtw89_mac_c2h_info *c2h_info)
8014 {
8015 int ret;
8016
8017 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
8018 lockdep_assert_wiphy(rtwdev->hw->wiphy);
8019
8020 if (!h2c_info && !c2h_info)
8021 return -EINVAL;
8022
8023 if (!h2c_info)
8024 goto recv_c2h;
8025
8026 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
8027 if (ret)
8028 return ret;
8029
8030 recv_c2h:
8031 if (!c2h_info)
8032 return 0;
8033
8034 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
8035 if (ret)
8036 return ret;
8037
8038 return 0;
8039 }
8040
rtw89_fw_st_dbg_dump(struct rtw89_dev * rtwdev)8041 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
8042 {
8043 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
8044 rtw89_err(rtwdev, "[ERR]pwr is off\n");
8045 return;
8046 }
8047
8048 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
8049 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
8050 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
8051 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
8052 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
8053 rtw89_read32(rtwdev, R_AX_HALT_C2H));
8054 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
8055 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
8056
8057 rtw89_fw_prog_cnt_dump(rtwdev);
8058 }
8059
rtw89_hw_scan_release_pkt_list(struct rtw89_dev * rtwdev)8060 static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev)
8061 {
8062 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
8063 struct rtw89_pktofld_info *info, *tmp;
8064 u8 idx;
8065
8066 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
8067 if (!(rtwdev->chip->support_bands & BIT(idx)))
8068 continue;
8069
8070 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) {
8071 if (test_bit(info->id, rtwdev->pkt_offload))
8072 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
8073 list_del(&info->list);
8074 kfree(info);
8075 }
8076 }
8077 }
8078
rtw89_hw_scan_cleanup(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)8079 static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev,
8080 struct rtw89_vif_link *rtwvif_link)
8081 {
8082 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
8083 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8084 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
8085
8086 mac->free_chan_list(rtwdev);
8087 rtw89_hw_scan_release_pkt_list(rtwdev);
8088
8089 rtwvif->scan_req = NULL;
8090 rtwvif->scan_ies = NULL;
8091 scan_info->scanning_vif = NULL;
8092 scan_info->abort = false;
8093 scan_info->connected = false;
8094 scan_info->delay = 0;
8095 }
8096
rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev * rtwdev,struct cfg80211_scan_request * req,struct rtw89_pktofld_info * info,enum nl80211_band band,u8 ssid_idx)8097 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
8098 struct cfg80211_scan_request *req,
8099 struct rtw89_pktofld_info *info,
8100 enum nl80211_band band, u8 ssid_idx)
8101 {
8102 if (band != NL80211_BAND_6GHZ)
8103 return false;
8104
8105 if (req->ssids[ssid_idx].ssid_len) {
8106 memcpy(info->ssid, req->ssids[ssid_idx].ssid,
8107 req->ssids[ssid_idx].ssid_len);
8108 info->ssid_len = req->ssids[ssid_idx].ssid_len;
8109 return false;
8110 } else {
8111 info->wildcard_6ghz = true;
8112 return true;
8113 }
8114 }
8115
rtw89_append_probe_req_ie(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct sk_buff * skb,u8 ssid_idx)8116 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
8117 struct rtw89_vif_link *rtwvif_link,
8118 struct sk_buff *skb, u8 ssid_idx)
8119 {
8120 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8121 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
8122 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
8123 struct cfg80211_scan_request *req = rtwvif->scan_req;
8124 struct rtw89_pktofld_info *info;
8125 struct sk_buff *new;
8126 int ret = 0;
8127 u8 band;
8128
8129 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
8130 if (!(rtwdev->chip->support_bands & BIT(band)))
8131 continue;
8132
8133 new = skb_copy(skb, GFP_KERNEL);
8134 if (!new) {
8135 ret = -ENOMEM;
8136 goto out;
8137 }
8138 skb_put_data(new, ies->ies[band], ies->len[band]);
8139 skb_put_data(new, ies->common_ies, ies->common_ie_len);
8140
8141 info = kzalloc_obj(*info);
8142 if (!info) {
8143 ret = -ENOMEM;
8144 kfree_skb(new);
8145 goto out;
8146 }
8147
8148 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx);
8149
8150 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
8151 if (ret) {
8152 kfree_skb(new);
8153 kfree(info);
8154 goto out;
8155 }
8156
8157 list_add_tail(&info->list, &scan_info->pkt_list[band]);
8158 kfree_skb(new);
8159 }
8160 out:
8161 return ret;
8162 }
8163
rtw89_hw_scan_update_probe_req(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,const u8 * mac_addr)8164 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev,
8165 struct rtw89_vif_link *rtwvif_link,
8166 const u8 *mac_addr)
8167 {
8168 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
8169 struct cfg80211_scan_request *req = rtwvif->scan_req;
8170 struct sk_buff *skb;
8171 u8 num = req->n_ssids, i;
8172 int ret;
8173
8174 for (i = 0; i < num; i++) {
8175 skb = ieee80211_probereq_get(rtwdev->hw, mac_addr,
8176 req->ssids[i].ssid,
8177 req->ssids[i].ssid_len,
8178 req->ie_len);
8179 if (!skb)
8180 return -ENOMEM;
8181
8182 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i);
8183 kfree_skb(skb);
8184
8185 if (ret)
8186 return ret;
8187 }
8188
8189 return 0;
8190 }
8191
rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev * rtwdev,struct ieee80211_scan_ies * ies,struct cfg80211_scan_request * req,struct rtw89_mac_chinfo_ax * ch_info)8192 static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev,
8193 struct ieee80211_scan_ies *ies,
8194 struct cfg80211_scan_request *req,
8195 struct rtw89_mac_chinfo_ax *ch_info)
8196 {
8197 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
8198 struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
8199 struct cfg80211_scan_6ghz_params *params;
8200 struct rtw89_pktofld_info *info, *tmp;
8201 struct ieee80211_hdr *hdr;
8202 struct sk_buff *skb;
8203 bool found;
8204 int ret = 0;
8205 u8 i;
8206
8207 if (!req->n_6ghz_params)
8208 return 0;
8209
8210 for (i = 0; i < req->n_6ghz_params; i++) {
8211 params = &req->scan_6ghz_params[i];
8212
8213 if (req->channels[params->channel_idx]->hw_value !=
8214 ch_info->pri_ch)
8215 continue;
8216
8217 found = false;
8218 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) {
8219 if (ether_addr_equal(tmp->bssid, params->bssid)) {
8220 found = true;
8221 break;
8222 }
8223 }
8224 if (found)
8225 continue;
8226
8227 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr,
8228 NULL, 0, req->ie_len);
8229 if (!skb)
8230 return -ENOMEM;
8231
8232 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]);
8233 skb_put_data(skb, ies->common_ies, ies->common_ie_len);
8234 hdr = (struct ieee80211_hdr *)skb->data;
8235 ether_addr_copy(hdr->addr3, params->bssid);
8236
8237 info = kzalloc_obj(*info);
8238 if (!info) {
8239 ret = -ENOMEM;
8240 kfree_skb(skb);
8241 goto out;
8242 }
8243
8244 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
8245 if (ret) {
8246 kfree_skb(skb);
8247 kfree(info);
8248 goto out;
8249 }
8250
8251 ether_addr_copy(info->bssid, params->bssid);
8252 info->channel_6ghz = req->channels[params->channel_idx]->hw_value;
8253 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]);
8254
8255 ch_info->tx_pkt = true;
8256 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
8257
8258 kfree_skb(skb);
8259 }
8260
8261 out:
8262 return ret;
8263 }
8264
rtw89_pno_scan_add_chan_ax(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_ax * ch_info)8265 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev,
8266 int chan_type, int ssid_num,
8267 struct rtw89_mac_chinfo_ax *ch_info)
8268 {
8269 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8270 struct rtw89_pktofld_info *info;
8271 u8 probe_count = 0;
8272
8273 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
8274 ch_info->bw = RTW89_SCAN_WIDTH;
8275 ch_info->tx_pkt = true;
8276 ch_info->cfg_tx_pwr = false;
8277 ch_info->tx_pwr_idx = 0;
8278 ch_info->tx_null = false;
8279 ch_info->pause_data = false;
8280 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
8281
8282 if (ssid_num) {
8283 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
8284 if (info->channel_6ghz &&
8285 ch_info->pri_ch != info->channel_6ghz)
8286 continue;
8287 else if (info->channel_6ghz && probe_count != 0)
8288 ch_info->period += RTW89_CHANNEL_TIME_6G;
8289
8290 if (info->wildcard_6ghz)
8291 continue;
8292
8293 ch_info->pkt_id[probe_count++] = info->id;
8294 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
8295 break;
8296 }
8297 ch_info->num_pkt = probe_count;
8298 }
8299
8300 switch (chan_type) {
8301 case RTW89_CHAN_DFS:
8302 if (ch_info->ch_band != RTW89_BAND_6G)
8303 ch_info->period = max_t(u8, ch_info->period,
8304 RTW89_DFS_CHAN_TIME);
8305 ch_info->dwell_time = RTW89_DWELL_TIME;
8306 break;
8307 case RTW89_CHAN_ACTIVE:
8308 break;
8309 default:
8310 rtw89_err(rtwdev, "Channel type out of bound\n");
8311 }
8312 }
8313
rtw89_hw_scan_add_chan_ax(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_ax * ch_info)8314 static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type,
8315 int ssid_num,
8316 struct rtw89_mac_chinfo_ax *ch_info)
8317 {
8318 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8319 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
8320 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
8321 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
8322 struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
8323 struct cfg80211_scan_request *req = rtwvif->scan_req;
8324 struct rtw89_chan *op = &rtwdev->scan_info.op_chan;
8325 struct rtw89_pktofld_info *info;
8326 struct ieee80211_vif *vif;
8327 u8 band, probe_count = 0;
8328 int ret;
8329
8330 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
8331 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
8332 ch_info->bw = RTW89_SCAN_WIDTH;
8333 ch_info->tx_pkt = true;
8334 ch_info->cfg_tx_pwr = false;
8335 ch_info->tx_pwr_idx = 0;
8336 ch_info->tx_null = false;
8337 ch_info->pause_data = false;
8338 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
8339
8340 if (ch_info->ch_band == RTW89_BAND_6G) {
8341 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
8342 !ch_info->is_psc) {
8343 ch_info->tx_pkt = false;
8344 if (!req->duration_mandatory)
8345 ch_info->period -= RTW89_DWELL_TIME_6G;
8346 }
8347 }
8348
8349 ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info);
8350 if (ret)
8351 rtw89_warn(rtwdev, "RNR fails: %d\n", ret);
8352
8353 if (ssid_num) {
8354 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
8355
8356 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
8357 if (info->channel_6ghz &&
8358 ch_info->pri_ch != info->channel_6ghz)
8359 continue;
8360 else if (info->channel_6ghz && probe_count != 0)
8361 ch_info->period += RTW89_CHANNEL_TIME_6G;
8362
8363 if (info->wildcard_6ghz)
8364 continue;
8365
8366 ch_info->pkt_id[probe_count++] = info->id;
8367 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
8368 break;
8369 }
8370 ch_info->num_pkt = probe_count;
8371 }
8372
8373 switch (chan_type) {
8374 case RTW89_CHAN_OPERATE:
8375 ch_info->central_ch = op->channel;
8376 ch_info->pri_ch = op->primary_channel;
8377 ch_info->ch_band = op->band_type;
8378 ch_info->bw = op->band_width;
8379 vif = rtwvif_link_to_vif(rtwvif_link);
8380 ch_info->tx_null = !is_zero_ether_addr(rtwvif_link->bssid) &&
8381 vif->type != NL80211_IFTYPE_AP;
8382 ch_info->num_pkt = 0;
8383 break;
8384 case RTW89_CHAN_DFS:
8385 if (ch_info->ch_band != RTW89_BAND_6G)
8386 ch_info->period = max_t(u8, ch_info->period,
8387 RTW89_DFS_CHAN_TIME);
8388 ch_info->dwell_time = RTW89_DWELL_TIME;
8389 ch_info->pause_data = true;
8390 break;
8391 case RTW89_CHAN_ACTIVE:
8392 ch_info->pause_data = true;
8393 break;
8394 case RTW89_CHAN_EXTRA_OP:
8395 ch_info->central_ch = ext->chan.channel;
8396 ch_info->pri_ch = ext->chan.primary_channel;
8397 ch_info->ch_band = ext->chan.band_type;
8398 ch_info->bw = ext->chan.band_width;
8399 vif = rtwvif_link_to_vif(ext->rtwvif_link);
8400 ch_info->tx_null = !is_zero_ether_addr(ext->rtwvif_link->bssid) &&
8401 vif->type != NL80211_IFTYPE_AP;
8402 ch_info->num_pkt = 0;
8403 ch_info->macid_tx = true;
8404 break;
8405 default:
8406 rtw89_err(rtwdev, "Channel type out of bound\n");
8407 }
8408 }
8409
rtw89_pno_scan_add_chan_be(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_be * ch_info)8410 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
8411 int ssid_num,
8412 struct rtw89_mac_chinfo_be *ch_info)
8413 {
8414 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8415 struct rtw89_pktofld_info *info;
8416 u8 probe_count = 0, i;
8417
8418 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
8419 ch_info->bw = RTW89_SCAN_WIDTH;
8420 ch_info->tx_null = false;
8421 ch_info->pause_data = false;
8422 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
8423
8424 if (ssid_num) {
8425 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) {
8426 ch_info->pkt_id[probe_count++] = info->id;
8427 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
8428 break;
8429 }
8430 }
8431
8432 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
8433 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
8434
8435 switch (chan_type) {
8436 case RTW89_CHAN_DFS:
8437 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
8438 ch_info->dwell_time = RTW89_DWELL_TIME;
8439 break;
8440 case RTW89_CHAN_ACTIVE:
8441 break;
8442 default:
8443 rtw89_warn(rtwdev, "Channel type out of bound\n");
8444 break;
8445 }
8446 }
8447
rtw89_hw_scan_add_chan_be(struct rtw89_dev * rtwdev,int chan_type,int ssid_num,struct rtw89_mac_chinfo_be * ch_info)8448 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
8449 int ssid_num,
8450 struct rtw89_mac_chinfo_be *ch_info)
8451 {
8452 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8453 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif;
8454 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
8455 struct cfg80211_scan_request *req = rtwvif->scan_req;
8456 struct rtw89_pktofld_info *info;
8457 u8 band, probe_count = 0, i;
8458
8459 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK;
8460 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS;
8461 ch_info->bw = RTW89_SCAN_WIDTH;
8462 ch_info->tx_null = false;
8463 ch_info->pause_data = false;
8464 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
8465
8466 if (ssid_num) {
8467 band = rtw89_hw_to_nl80211_band(ch_info->ch_band);
8468
8469 list_for_each_entry(info, &scan_info->pkt_list[band], list) {
8470 if (info->channel_6ghz &&
8471 ch_info->pri_ch != info->channel_6ghz)
8472 continue;
8473
8474 if (info->wildcard_6ghz)
8475 continue;
8476
8477 ch_info->pkt_id[probe_count++] = info->id;
8478 if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
8479 break;
8480 }
8481 }
8482
8483 if (ch_info->ch_band == RTW89_BAND_6G) {
8484 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) ||
8485 !ch_info->is_psc) {
8486 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE;
8487 if (!req->duration_mandatory)
8488 ch_info->period -= RTW89_DWELL_TIME_6G;
8489 }
8490 }
8491
8492 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++)
8493 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE;
8494
8495 switch (chan_type) {
8496 case RTW89_CHAN_DFS:
8497 if (ch_info->ch_band != RTW89_BAND_6G)
8498 ch_info->period =
8499 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME);
8500 ch_info->dwell_time = RTW89_DWELL_TIME;
8501 ch_info->pause_data = true;
8502 break;
8503 case RTW89_CHAN_ACTIVE:
8504 ch_info->pause_data = true;
8505 break;
8506 default:
8507 rtw89_warn(rtwdev, "Channel type out of bound\n");
8508 break;
8509 }
8510 }
8511
rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)8512 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
8513 struct rtw89_vif_link *rtwvif_link)
8514 {
8515 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8516 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
8517 struct rtw89_mac_chinfo_ax *ch_info, *tmp;
8518 struct ieee80211_channel *channel;
8519 struct list_head chan_list;
8520 int list_len;
8521 enum rtw89_chan_type type;
8522 int ret = 0;
8523 u32 idx;
8524
8525 INIT_LIST_HEAD(&chan_list);
8526 for (idx = 0, list_len = 0;
8527 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX;
8528 idx++, list_len++) {
8529 channel = nd_config->channels[idx];
8530 ch_info = kzalloc_obj(*ch_info);
8531 if (!ch_info) {
8532 ret = -ENOMEM;
8533 goto out;
8534 }
8535
8536 ch_info->period = RTW89_CHANNEL_TIME;
8537 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
8538 ch_info->central_ch = channel->hw_value;
8539 ch_info->pri_ch = channel->hw_value;
8540 ch_info->is_psc = cfg80211_channel_is_psc(channel);
8541
8542 if (channel->flags &
8543 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
8544 type = RTW89_CHAN_DFS;
8545 else
8546 type = RTW89_CHAN_ACTIVE;
8547
8548 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info);
8549 list_add_tail(&ch_info->list, &chan_list);
8550 }
8551 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list);
8552
8553 out:
8554 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
8555 list_del(&ch_info->list);
8556 kfree(ch_info);
8557 }
8558
8559 return ret;
8560 }
8561
rtw89_hw_scan_add_op_types_ax(struct rtw89_dev * rtwdev,enum rtw89_chan_type type,struct list_head * chan_list,struct cfg80211_scan_request * req,int * off_chan_time)8562 static int rtw89_hw_scan_add_op_types_ax(struct rtw89_dev *rtwdev,
8563 enum rtw89_chan_type type,
8564 struct list_head *chan_list,
8565 struct cfg80211_scan_request *req,
8566 int *off_chan_time)
8567 {
8568 struct rtw89_mac_chinfo_ax *tmp;
8569
8570 tmp = kzalloc_obj(*tmp);
8571 if (!tmp)
8572 return -ENOMEM;
8573
8574 switch (type) {
8575 case RTW89_CHAN_OPERATE:
8576 tmp->period = req->duration_mandatory ?
8577 req->duration : RTW89_CHANNEL_TIME;
8578 *off_chan_time = 0;
8579 break;
8580 case RTW89_CHAN_EXTRA_OP:
8581 tmp->period = RTW89_CHANNEL_TIME_EXTRA_OP;
8582 /* still calc @off_chan_time for scan op */
8583 *off_chan_time += tmp->period;
8584 break;
8585 default:
8586 kfree(tmp);
8587 return -EINVAL;
8588 }
8589
8590 rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp);
8591 list_add_tail(&tmp->list, chan_list);
8592
8593 return 0;
8594 }
8595
rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)8596 int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev,
8597 struct rtw89_vif_link *rtwvif_link)
8598 {
8599 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8600 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
8601 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
8602 struct cfg80211_scan_request *req = rtwvif->scan_req;
8603 struct rtw89_mac_chinfo_ax *ch_info, *tmp;
8604 struct ieee80211_channel *channel;
8605 struct list_head chan_list;
8606 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN;
8607 enum rtw89_chan_type type;
8608 int off_chan_time = 0;
8609 int ret;
8610 u32 idx;
8611
8612 INIT_LIST_HEAD(&chan_list);
8613
8614 for (idx = 0; idx < req->n_channels; idx++) {
8615 channel = req->channels[idx];
8616 ch_info = kzalloc_obj(*ch_info);
8617 if (!ch_info) {
8618 ret = -ENOMEM;
8619 goto out;
8620 }
8621
8622 if (req->duration)
8623 ch_info->period = req->duration;
8624 else if (channel->band == NL80211_BAND_6GHZ)
8625 ch_info->period = RTW89_CHANNEL_TIME_6G +
8626 RTW89_DWELL_TIME_6G;
8627 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
8628 ch_info->period = RTW89_P2P_CHAN_TIME;
8629 else
8630 ch_info->period = RTW89_CHANNEL_TIME;
8631
8632 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
8633 ch_info->central_ch = channel->hw_value;
8634 ch_info->pri_ch = channel->hw_value;
8635 ch_info->rand_seq_num = random_seq;
8636 ch_info->is_psc = cfg80211_channel_is_psc(channel);
8637
8638 if (channel->flags &
8639 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
8640 type = RTW89_CHAN_DFS;
8641 else
8642 type = RTW89_CHAN_ACTIVE;
8643 rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info);
8644
8645 if (!(scan_info->connected &&
8646 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME))
8647 goto next;
8648
8649 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_OPERATE,
8650 &chan_list, req, &off_chan_time);
8651 if (ret) {
8652 kfree(ch_info);
8653 goto out;
8654 }
8655
8656 if (!ext->set)
8657 goto next;
8658
8659 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_EXTRA_OP,
8660 &chan_list, req, &off_chan_time);
8661 if (ret) {
8662 kfree(ch_info);
8663 goto out;
8664 }
8665
8666 next:
8667 list_add_tail(&ch_info->list, &chan_list);
8668 off_chan_time += ch_info->period;
8669 }
8670
8671 list_splice_tail(&chan_list, &scan_info->chan_list);
8672 return 0;
8673
8674 out:
8675 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
8676 list_del(&ch_info->list);
8677 kfree(ch_info);
8678 }
8679
8680 return ret;
8681 }
8682
rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev * rtwdev)8683 void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev)
8684 {
8685 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8686 struct rtw89_mac_chinfo_ax *ch_info, *tmp;
8687
8688 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
8689 list_del(&ch_info->list);
8690 kfree(ch_info);
8691 }
8692 }
8693
rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)8694 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev,
8695 struct rtw89_vif_link *rtwvif_link)
8696 {
8697 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8698 struct rtw89_mac_chinfo_ax *ch_info, *tmp;
8699 unsigned int list_len = 0;
8700 struct list_head list;
8701 int ret;
8702
8703 INIT_LIST_HEAD(&list);
8704
8705 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
8706 /* The operating channel (tx_null == true) should
8707 * not be last in the list, to avoid breaking
8708 * RTL8851BU and RTL8832BU.
8709 */
8710 if (list_len + 1 == RTW89_SCAN_LIST_LIMIT_AX && ch_info->tx_null)
8711 break;
8712
8713 list_move_tail(&ch_info->list, &list);
8714
8715 list_len++;
8716 if (list_len == RTW89_SCAN_LIST_LIMIT_AX)
8717 break;
8718 }
8719
8720 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list);
8721
8722 list_for_each_entry_safe(ch_info, tmp, &list, list) {
8723 list_del(&ch_info->list);
8724 kfree(ch_info);
8725 }
8726
8727 return ret;
8728 }
8729
rtw89_pno_scan_add_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)8730 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
8731 struct rtw89_vif_link *rtwvif_link)
8732 {
8733 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
8734 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
8735 struct rtw89_mac_chinfo_be *ch_info, *tmp;
8736 struct ieee80211_channel *channel;
8737 struct list_head chan_list;
8738 enum rtw89_chan_type type;
8739 int list_len, ret;
8740 u32 idx;
8741
8742 INIT_LIST_HEAD(&chan_list);
8743
8744 for (idx = 0, list_len = 0;
8745 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE;
8746 idx++, list_len++) {
8747 channel = nd_config->channels[idx];
8748 ch_info = kzalloc_obj(*ch_info);
8749 if (!ch_info) {
8750 ret = -ENOMEM;
8751 goto out;
8752 }
8753
8754 ch_info->period = RTW89_CHANNEL_TIME;
8755 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
8756 ch_info->central_ch = channel->hw_value;
8757 ch_info->pri_ch = channel->hw_value;
8758 ch_info->is_psc = cfg80211_channel_is_psc(channel);
8759
8760 if (channel->flags &
8761 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
8762 type = RTW89_CHAN_DFS;
8763 else
8764 type = RTW89_CHAN_ACTIVE;
8765
8766 rtw89_pno_scan_add_chan_be(rtwdev, type,
8767 nd_config->n_match_sets, ch_info);
8768 list_add_tail(&ch_info->list, &chan_list);
8769 }
8770
8771 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list,
8772 rtwvif_link);
8773
8774 out:
8775 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
8776 list_del(&ch_info->list);
8777 kfree(ch_info);
8778 }
8779
8780 return ret;
8781 }
8782
rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)8783 int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev,
8784 struct rtw89_vif_link *rtwvif_link)
8785 {
8786 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8787 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
8788 struct cfg80211_scan_request *req = rtwvif->scan_req;
8789 struct rtw89_mac_chinfo_be *ch_info, *tmp;
8790 struct ieee80211_channel *channel;
8791 struct list_head chan_list;
8792 enum rtw89_chan_type type;
8793 bool chan_by_rnr;
8794 bool random_seq;
8795 int ret;
8796 u32 idx;
8797
8798 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN);
8799 chan_by_rnr = rtwdev->chip->support_rnr &&
8800 (req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ);
8801 INIT_LIST_HEAD(&chan_list);
8802
8803 for (idx = 0; idx < req->n_channels; idx++) {
8804 channel = req->channels[idx];
8805
8806 if (channel->band == NL80211_BAND_6GHZ &&
8807 !cfg80211_channel_is_psc(channel) && chan_by_rnr)
8808 continue;
8809
8810 ch_info = kzalloc_obj(*ch_info);
8811 if (!ch_info) {
8812 ret = -ENOMEM;
8813 goto out;
8814 }
8815
8816 if (req->duration)
8817 ch_info->period = req->duration;
8818 else if (channel->band == NL80211_BAND_6GHZ)
8819 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G;
8820 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT)
8821 ch_info->period = RTW89_P2P_CHAN_TIME;
8822 else
8823 ch_info->period = RTW89_CHANNEL_TIME;
8824
8825 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band);
8826 ch_info->central_ch = channel->hw_value;
8827 ch_info->pri_ch = channel->hw_value;
8828 ch_info->rand_seq_num = random_seq;
8829 ch_info->is_psc = cfg80211_channel_is_psc(channel);
8830
8831 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR))
8832 type = RTW89_CHAN_DFS;
8833 else
8834 type = RTW89_CHAN_ACTIVE;
8835 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info);
8836
8837 list_add_tail(&ch_info->list, &chan_list);
8838 }
8839
8840 list_splice_tail(&chan_list, &scan_info->chan_list);
8841 return 0;
8842
8843 out:
8844 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) {
8845 list_del(&ch_info->list);
8846 kfree(ch_info);
8847 }
8848
8849 return ret;
8850 }
8851
rtw89_hw_scan_free_chan_list_be(struct rtw89_dev * rtwdev)8852 void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev)
8853 {
8854 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8855 struct rtw89_mac_chinfo_be *ch_info, *tmp;
8856
8857 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
8858 list_del(&ch_info->list);
8859 kfree(ch_info);
8860 }
8861 }
8862
rtw89_hw_scan_add_chan_list_be(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)8863 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev,
8864 struct rtw89_vif_link *rtwvif_link)
8865 {
8866 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8867 struct rtw89_mac_chinfo_be *ch_info, *tmp;
8868 unsigned int list_len = 0;
8869 struct list_head list;
8870 int ret;
8871
8872 INIT_LIST_HEAD(&list);
8873
8874 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) {
8875 list_move_tail(&ch_info->list, &list);
8876
8877 list_len++;
8878 if (list_len == RTW89_SCAN_LIST_LIMIT_BE)
8879 break;
8880 }
8881
8882 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list,
8883 rtwvif_link);
8884
8885 list_for_each_entry_safe(ch_info, tmp, &list, list) {
8886 list_del(&ch_info->list);
8887 kfree(ch_info);
8888 }
8889
8890 return ret;
8891 }
8892
rtw89_hw_scan_prehandle(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,const u8 * mac_addr)8893 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev,
8894 struct rtw89_vif_link *rtwvif_link,
8895 const u8 *mac_addr)
8896 {
8897 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
8898 int ret;
8899
8900 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr);
8901 if (ret) {
8902 rtw89_err(rtwdev, "Update probe request failed\n");
8903 goto out;
8904 }
8905 ret = mac->prep_chan_list(rtwdev, rtwvif_link);
8906 out:
8907 return ret;
8908 }
8909
rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,u16 tu,bool scan)8910 static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev,
8911 struct rtw89_vif_link *rtwvif_link,
8912 u16 tu, bool scan)
8913 {
8914 struct ieee80211_p2p_noa_desc noa_desc = {};
8915 struct ieee80211_bss_conf *bss_conf;
8916 u16 beacon_int;
8917 u64 tsf;
8918 int ret;
8919
8920 rcu_read_lock();
8921
8922 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true);
8923 beacon_int = bss_conf->beacon_int;
8924
8925 rcu_read_unlock();
8926
8927 tu += beacon_int * 3;
8928 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX)
8929 rtwdev->scan_info.delay = ieee80211_tu_to_usec(beacon_int * 3) / 1000;
8930
8931 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf);
8932 if (ret) {
8933 rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__);
8934 return;
8935 }
8936
8937 noa_desc.start_time = cpu_to_le32(tsf);
8938 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) {
8939 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu));
8940 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu));
8941 noa_desc.count = 1;
8942 } else {
8943 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(20000));
8944 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(20000));
8945 noa_desc.count = 255;
8946 }
8947
8948 rtw89_p2p_noa_renew(rtwvif_link);
8949 if (scan)
8950 rtw89_p2p_noa_append(rtwvif_link, &noa_desc);
8951
8952 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link);
8953 }
8954
rtw89_hw_scan_update_beacon_noa(struct rtw89_dev * rtwdev,bool scan)8955 static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev, bool scan)
8956 {
8957 const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
8958 const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
8959 const struct rtw89_chip_info *chip = rtwdev->chip;
8960 struct rtw89_mac_chinfo_ax *chinfo_ax;
8961 struct rtw89_mac_chinfo_be *chinfo_be;
8962 struct rtw89_vif_link *rtwvif_link;
8963 struct list_head *pos, *tmp;
8964 struct ieee80211_vif *vif;
8965 struct rtw89_vif *rtwvif;
8966 u16 tu = 0;
8967
8968 lockdep_assert_wiphy(rtwdev->hw->wiphy);
8969
8970 if (!scan)
8971 goto update;
8972
8973 list_for_each_safe(pos, tmp, &scan_info->chan_list) {
8974 switch (chip->chip_gen) {
8975 case RTW89_CHIP_AX:
8976 chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list);
8977 tu += chinfo_ax->period;
8978 break;
8979 case RTW89_CHIP_BE:
8980 chinfo_be = list_entry(pos, typeof(*chinfo_be), list);
8981 tu += chinfo_be->period;
8982 break;
8983 default:
8984 rtw89_warn(rtwdev, "%s: invalid chip gen %d\n",
8985 __func__, chip->chip_gen);
8986 return;
8987 }
8988 }
8989
8990 if (unlikely(tu == 0)) {
8991 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
8992 "%s: cannot estimate needed TU\n", __func__);
8993 return;
8994 }
8995
8996 update:
8997 list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) {
8998 unsigned int link_id;
8999
9000 vif = rtwvif_to_vif(rtwvif);
9001 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
9002 continue;
9003
9004 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id)
9005 rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link,
9006 tu, scan);
9007 }
9008 }
9009
rtw89_hw_scan_set_extra_op_info(struct rtw89_dev * rtwdev,struct rtw89_vif * scan_rtwvif,const struct rtw89_chan * scan_op)9010 static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev,
9011 struct rtw89_vif *scan_rtwvif,
9012 const struct rtw89_chan *scan_op)
9013 {
9014 struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt;
9015 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
9016 struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
9017 struct rtw89_vif *tmp;
9018
9019 ext->set = false;
9020 if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_EXTRA_OP, &rtwdev->fw))
9021 return;
9022
9023 list_for_each_entry(tmp, &mgnt->active_list, mgnt_entry) {
9024 const struct rtw89_chan *tmp_chan;
9025 struct rtw89_vif_link *tmp_link;
9026
9027 if (tmp == scan_rtwvif)
9028 continue;
9029
9030 tmp_link = rtw89_get_designated_link(tmp);
9031 if (unlikely(!tmp_link))
9032 continue;
9033
9034 tmp_chan = rtw89_chan_get(rtwdev, tmp_link->chanctx_idx);
9035 *ext = (struct rtw89_hw_scan_extra_op){
9036 .set = true,
9037 .macid = tmp_link->mac_id,
9038 .port = tmp_link->port,
9039 .chan = *tmp_chan,
9040 .rtwvif_link = tmp_link,
9041 };
9042
9043 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
9044 "hw scan: extra op: center %d primary %d\n",
9045 ext->chan.channel, ext->chan.primary_channel);
9046 break;
9047 }
9048 }
9049
rtw89_hw_scan_start(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,struct ieee80211_scan_request * scan_req)9050 int rtw89_hw_scan_start(struct rtw89_dev *rtwdev,
9051 struct rtw89_vif_link *rtwvif_link,
9052 struct ieee80211_scan_request *scan_req)
9053 {
9054 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
9055 struct cfg80211_scan_request *req = &scan_req->req;
9056 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
9057 rtwvif_link->chanctx_idx);
9058 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
9059 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
9060 struct rtw89_chanctx_pause_parm pause_parm = {
9061 .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN,
9062 .trigger = rtwvif_link,
9063 };
9064 u32 rx_fltr = rtwdev->hal.rx_fltr;
9065 u8 mac_addr[ETH_ALEN];
9066 int ret;
9067
9068 /* clone op and keep it during scan */
9069 rtwdev->scan_info.op_chan = *chan;
9070
9071 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN,
9072 "hw scan: op: center %d primary %d\n",
9073 chan->channel, chan->primary_channel);
9074
9075 rtw89_hw_scan_set_extra_op_info(rtwdev, rtwvif, chan);
9076
9077 rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev);
9078 rtwdev->scan_info.scanning_vif = rtwvif_link;
9079 rtwdev->scan_info.abort = false;
9080 rtwdev->scan_info.delay = 0;
9081 rtwvif->scan_ies = &scan_req->ies;
9082 rtwvif->scan_req = req;
9083
9084 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR)
9085 get_random_mask_addr(mac_addr, req->mac_addr,
9086 req->mac_addr_mask);
9087 else if (ieee80211_vif_is_mld(vif))
9088 ether_addr_copy(mac_addr, vif->addr);
9089 else
9090 ether_addr_copy(mac_addr, rtwvif_link->mac_addr);
9091
9092 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr);
9093 if (ret) {
9094 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
9095 return ret;
9096 }
9097
9098 ieee80211_stop_queues(rtwdev->hw);
9099 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false);
9100
9101 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true);
9102
9103 rx_fltr &= ~B_AX_A_BCN_CHK_EN;
9104 rx_fltr &= ~B_AX_A_BC;
9105 rx_fltr &= ~B_AX_A_A1_MATCH;
9106
9107 rtw89_mac_set_rx_fltr(rtwdev, rtwvif_link->mac_idx, rx_fltr);
9108
9109 rtw89_chanctx_pause(rtwdev, &pause_parm);
9110 rtw89_phy_dig_suspend(rtwdev);
9111
9112 if (mode == RTW89_ENTITY_MODE_MCC)
9113 rtw89_hw_scan_update_beacon_noa(rtwdev, true);
9114
9115 return 0;
9116 }
9117
9118 struct rtw89_hw_scan_complete_cb_data {
9119 struct rtw89_vif_link *rtwvif_link;
9120 bool aborted;
9121 };
9122
rtw89_hw_scan_complete_cb(struct rtw89_dev * rtwdev,void * data)9123 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data)
9124 {
9125 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
9126 struct rtw89_hw_scan_complete_cb_data *cb_data = data;
9127 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link;
9128 struct cfg80211_scan_info info = {
9129 .aborted = cb_data->aborted,
9130 };
9131
9132 if (!rtwvif_link)
9133 return -EINVAL;
9134
9135 rtw89_mac_set_rx_fltr(rtwdev, rtwvif_link->mac_idx, rtwdev->hal.rx_fltr);
9136
9137 rtw89_core_scan_complete(rtwdev, rtwvif_link, true);
9138 ieee80211_scan_completed(rtwdev->hw, &info);
9139 ieee80211_wake_queues(rtwdev->hw);
9140 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true);
9141 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true);
9142 rtw89_phy_dig_resume(rtwdev, true);
9143
9144 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link);
9145
9146 if (mode == RTW89_ENTITY_MODE_MCC)
9147 rtw89_hw_scan_update_beacon_noa(rtwdev, false);
9148
9149 return 0;
9150 }
9151
rtw89_hw_scan_complete(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool aborted)9152 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev,
9153 struct rtw89_vif_link *rtwvif_link,
9154 bool aborted)
9155 {
9156 struct rtw89_hw_scan_complete_cb_data cb_data = {
9157 .rtwvif_link = rtwvif_link,
9158 .aborted = aborted,
9159 };
9160 const struct rtw89_chanctx_cb_parm cb_parm = {
9161 .cb = rtw89_hw_scan_complete_cb,
9162 .data = &cb_data,
9163 .caller = __func__,
9164 };
9165
9166 /* The things here needs to be done after setting channel (for coex)
9167 * and before proceeding entity mode (for MCC). So, pass a callback
9168 * of them for the right sequence rather than doing them directly.
9169 */
9170 rtw89_chanctx_proceed(rtwdev, &cb_parm);
9171 }
9172
rtw89_hw_scan_abort(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link)9173 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev,
9174 struct rtw89_vif_link *rtwvif_link)
9175 {
9176 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
9177 int ret;
9178
9179 scan_info->abort = true;
9180
9181 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false);
9182 if (ret)
9183 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret);
9184
9185 /* Indicate ieee80211_scan_completed() before returning, which is safe
9186 * because scan abort command always waits for completion of
9187 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan
9188 * work properly.
9189 */
9190 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true);
9191 }
9192
rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev * rtwdev)9193 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
9194 {
9195 struct rtw89_vif_link *rtwvif_link;
9196 struct rtw89_vif *rtwvif;
9197 unsigned int link_id;
9198
9199 rtw89_for_each_rtwvif(rtwdev, rtwvif) {
9200 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) {
9201 /* This variable implies connected or during attempt to connect */
9202 if (!is_zero_ether_addr(rtwvif_link->bssid))
9203 return true;
9204 }
9205 }
9206
9207 return false;
9208 }
9209
rtw89_hw_scan_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9210 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev,
9211 struct rtw89_vif_link *rtwvif_link,
9212 bool enable)
9213 {
9214 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
9215 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
9216 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op;
9217 struct rtw89_scan_option opt = {0};
9218 bool connected;
9219 int ret = 0;
9220
9221 if (!rtwvif_link)
9222 return -EINVAL;
9223
9224 connected = rtwdev->scan_info.connected;
9225 opt.enable = enable;
9226 opt.target_ch_mode = connected;
9227 opt.delay = rtwdev->scan_info.delay;
9228 if (enable) {
9229 ret = mac->add_chan_list(rtwdev, rtwvif_link);
9230 if (ret)
9231 goto out;
9232 }
9233
9234 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) {
9235 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP;
9236 opt.scan_mode = RTW89_SCAN_MODE_SA;
9237 opt.band = rtwvif_link->mac_idx;
9238 opt.num_macc_role = 0;
9239 opt.mlo_mode = rtwdev->mlo_dbcc_mode;
9240 opt.num_opch = connected ? 1 : 0;
9241 if (connected && ext->set)
9242 opt.num_opch++;
9243
9244 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID;
9245 }
9246
9247 ret = rtw89_mac_scan_offload(rtwdev, &opt, rtwvif_link, false);
9248
9249 out:
9250 return ret;
9251 }
9252
9253 #define H2C_FW_CPU_EXCEPTION_TYPE_0 0x5566
9254 #define H2C_FW_CPU_EXCEPTION_TYPE_1 0x0
rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev * rtwdev)9255 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev)
9256 {
9257 struct rtw89_h2c_trig_cpu_except *h2c;
9258 u32 cpu_exception_type_def;
9259 u32 len = sizeof(*h2c);
9260 struct sk_buff *skb;
9261 int ret;
9262
9263 if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_1, &rtwdev->fw))
9264 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_1;
9265 else if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_0, &rtwdev->fw))
9266 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_0;
9267 else
9268 return -EOPNOTSUPP;
9269
9270 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9271 if (!skb) {
9272 rtw89_err(rtwdev,
9273 "failed to alloc skb for fw cpu exception\n");
9274 return -ENOMEM;
9275 }
9276
9277 skb_put(skb, len);
9278 h2c = (struct rtw89_h2c_trig_cpu_except *)skb->data;
9279
9280 h2c->w0 = le32_encode_bits(cpu_exception_type_def,
9281 RTW89_H2C_CPU_EXCEPTION_TYPE);
9282
9283 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9284 H2C_CAT_TEST,
9285 H2C_CL_FW_STATUS_TEST,
9286 H2C_FUNC_CPU_EXCEPTION, 0, 0,
9287 len);
9288
9289 ret = rtw89_h2c_tx(rtwdev, skb, false);
9290 if (ret) {
9291 rtw89_err(rtwdev, "failed to send h2c\n");
9292 dev_kfree_skb_any(skb);
9293 return ret;
9294 }
9295
9296 return 0;
9297 }
9298
9299 #define H2C_PKT_DROP_LEN 24
rtw89_fw_h2c_pkt_drop(struct rtw89_dev * rtwdev,const struct rtw89_pkt_drop_params * params)9300 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev,
9301 const struct rtw89_pkt_drop_params *params)
9302 {
9303 struct sk_buff *skb;
9304 int ret;
9305
9306 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN);
9307 if (!skb) {
9308 rtw89_err(rtwdev,
9309 "failed to alloc skb for packet drop\n");
9310 return -ENOMEM;
9311 }
9312
9313 switch (params->sel) {
9314 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE:
9315 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE:
9316 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE:
9317 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE:
9318 case RTW89_PKT_DROP_SEL_BAND_ONCE:
9319 break;
9320 default:
9321 rtw89_debug(rtwdev, RTW89_DBG_FW,
9322 "H2C of pkt drop might not fully support sel: %d yet\n",
9323 params->sel);
9324 break;
9325 }
9326
9327 skb_put(skb, H2C_PKT_DROP_LEN);
9328 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel);
9329 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid);
9330 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band);
9331 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port);
9332 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid);
9333 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs);
9334 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data,
9335 params->macid_band_sel[0]);
9336 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data,
9337 params->macid_band_sel[1]);
9338 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data,
9339 params->macid_band_sel[2]);
9340 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data,
9341 params->macid_band_sel[3]);
9342
9343 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9344 H2C_CAT_MAC,
9345 H2C_CL_MAC_FW_OFLD,
9346 H2C_FUNC_PKT_DROP, 0, 0,
9347 H2C_PKT_DROP_LEN);
9348
9349 ret = rtw89_h2c_tx(rtwdev, skb, false);
9350 if (ret) {
9351 rtw89_err(rtwdev, "failed to send h2c\n");
9352 goto fail;
9353 }
9354
9355 return 0;
9356
9357 fail:
9358 dev_kfree_skb_any(skb);
9359 return ret;
9360 }
9361
9362 #define H2C_KEEP_ALIVE_LEN 4
rtw89_fw_h2c_keep_alive(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9363 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
9364 bool enable)
9365 {
9366 struct sk_buff *skb;
9367 u8 pkt_id = 0;
9368 int ret;
9369
9370 if (enable) {
9371 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
9372 RTW89_PKT_OFLD_TYPE_NULL_DATA,
9373 &pkt_id);
9374 if (ret)
9375 return -EPERM;
9376 }
9377
9378 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN);
9379 if (!skb) {
9380 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
9381 return -ENOMEM;
9382 }
9383
9384 skb_put(skb, H2C_KEEP_ALIVE_LEN);
9385
9386 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable);
9387 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id);
9388 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5);
9389 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id);
9390
9391 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9392 H2C_CAT_MAC,
9393 H2C_CL_MAC_WOW,
9394 H2C_FUNC_KEEP_ALIVE, 0, 1,
9395 H2C_KEEP_ALIVE_LEN);
9396
9397 ret = rtw89_h2c_tx(rtwdev, skb, false);
9398 if (ret) {
9399 rtw89_err(rtwdev, "failed to send h2c\n");
9400 goto fail;
9401 }
9402
9403 return 0;
9404
9405 fail:
9406 dev_kfree_skb_any(skb);
9407
9408 return ret;
9409 }
9410
rtw89_fw_h2c_arp_offload(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9411 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
9412 bool enable)
9413 {
9414 struct rtw89_h2c_arp_offload *h2c;
9415 u32 len = sizeof(*h2c);
9416 struct sk_buff *skb;
9417 u8 pkt_id = 0;
9418 int ret;
9419
9420 if (enable) {
9421 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
9422 RTW89_PKT_OFLD_TYPE_ARP_RSP,
9423 &pkt_id);
9424 if (ret)
9425 return ret;
9426 }
9427
9428 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9429 if (!skb) {
9430 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n");
9431 return -ENOMEM;
9432 }
9433
9434 skb_put(skb, len);
9435 h2c = (struct rtw89_h2c_arp_offload *)skb->data;
9436
9437 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) |
9438 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) |
9439 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
9440 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID);
9441
9442 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9443 H2C_CAT_MAC,
9444 H2C_CL_MAC_WOW,
9445 H2C_FUNC_ARP_OFLD, 0, 1,
9446 len);
9447
9448 ret = rtw89_h2c_tx(rtwdev, skb, false);
9449 if (ret) {
9450 rtw89_err(rtwdev, "failed to send h2c\n");
9451 goto fail;
9452 }
9453
9454 return 0;
9455
9456 fail:
9457 dev_kfree_skb_any(skb);
9458
9459 return ret;
9460 }
9461
9462 #define H2C_DISCONNECT_DETECT_LEN 8
rtw89_fw_h2c_disconnect_detect(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9463 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
9464 struct rtw89_vif_link *rtwvif_link, bool enable)
9465 {
9466 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
9467 struct sk_buff *skb;
9468 u8 macid = rtwvif_link->mac_id;
9469 int ret;
9470
9471 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN);
9472 if (!skb) {
9473 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
9474 return -ENOMEM;
9475 }
9476
9477 skb_put(skb, H2C_DISCONNECT_DETECT_LEN);
9478
9479 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) {
9480 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable);
9481 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable);
9482 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid);
9483 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100);
9484 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5);
9485 }
9486
9487 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9488 H2C_CAT_MAC,
9489 H2C_CL_MAC_WOW,
9490 H2C_FUNC_DISCONNECT_DETECT, 0, 1,
9491 H2C_DISCONNECT_DETECT_LEN);
9492
9493 ret = rtw89_h2c_tx(rtwdev, skb, false);
9494 if (ret) {
9495 rtw89_err(rtwdev, "failed to send h2c\n");
9496 goto fail;
9497 }
9498
9499 return 0;
9500
9501 fail:
9502 dev_kfree_skb_any(skb);
9503
9504 return ret;
9505 }
9506
rtw89_fw_h2c_cfg_pno(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9507 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
9508 bool enable)
9509 {
9510 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
9511 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config;
9512 struct rtw89_h2c_cfg_nlo *h2c;
9513 u32 len = sizeof(*h2c);
9514 struct sk_buff *skb;
9515 int ret, i;
9516
9517 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9518 if (!skb) {
9519 rtw89_err(rtwdev, "failed to alloc skb for nlo\n");
9520 return -ENOMEM;
9521 }
9522
9523 skb_put(skb, len);
9524 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data;
9525
9526 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) |
9527 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) |
9528 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID);
9529
9530 if (enable) {
9531 h2c->nlo_cnt = nd_config->n_match_sets;
9532 for (i = 0 ; i < nd_config->n_match_sets; i++) {
9533 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len;
9534 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid,
9535 nd_config->match_sets[i].ssid.ssid_len);
9536 }
9537 }
9538
9539 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9540 H2C_CAT_MAC,
9541 H2C_CL_MAC_WOW,
9542 H2C_FUNC_NLO, 0, 1,
9543 len);
9544
9545 ret = rtw89_h2c_tx(rtwdev, skb, false);
9546 if (ret) {
9547 rtw89_err(rtwdev, "failed to send h2c\n");
9548 goto fail;
9549 }
9550
9551 return 0;
9552
9553 fail:
9554 dev_kfree_skb_any(skb);
9555 return ret;
9556 }
9557
rtw89_fw_h2c_wow_global(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9558 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
9559 bool enable)
9560 {
9561 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
9562 struct rtw89_h2c_wow_global *h2c;
9563 u8 macid = rtwvif_link->mac_id;
9564 u32 len = sizeof(*h2c);
9565 struct sk_buff *skb;
9566 int ret;
9567
9568 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9569 if (!skb) {
9570 rtw89_err(rtwdev, "failed to alloc skb for wow global\n");
9571 return -ENOMEM;
9572 }
9573
9574 skb_put(skb, len);
9575 h2c = (struct rtw89_h2c_wow_global *)skb->data;
9576
9577 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) |
9578 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) |
9579 le32_encode_bits(rtw_wow->ptk_alg,
9580 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) |
9581 le32_encode_bits(rtw_wow->gtk_alg,
9582 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO);
9583 h2c->key_info = rtw_wow->key_info;
9584
9585 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9586 H2C_CAT_MAC,
9587 H2C_CL_MAC_WOW,
9588 H2C_FUNC_WOW_GLOBAL, 0, 1,
9589 len);
9590
9591 ret = rtw89_h2c_tx(rtwdev, skb, false);
9592 if (ret) {
9593 rtw89_err(rtwdev, "failed to send h2c\n");
9594 goto fail;
9595 }
9596
9597 return 0;
9598
9599 fail:
9600 dev_kfree_skb_any(skb);
9601
9602 return ret;
9603 }
9604
9605 #define H2C_WAKEUP_CTRL_LEN 4
rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9606 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
9607 struct rtw89_vif_link *rtwvif_link,
9608 bool enable)
9609 {
9610 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
9611 struct sk_buff *skb;
9612 u8 macid = rtwvif_link->mac_id;
9613 int ret;
9614
9615 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
9616 if (!skb) {
9617 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n");
9618 return -ENOMEM;
9619 }
9620
9621 skb_put(skb, H2C_WAKEUP_CTRL_LEN);
9622
9623 if (rtw_wow->pattern_cnt)
9624 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable);
9625 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags))
9626 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable);
9627 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags))
9628 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable);
9629
9630 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid);
9631
9632 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9633 H2C_CAT_MAC,
9634 H2C_CL_MAC_WOW,
9635 H2C_FUNC_WAKEUP_CTRL, 0, 1,
9636 H2C_WAKEUP_CTRL_LEN);
9637
9638 ret = rtw89_h2c_tx(rtwdev, skb, false);
9639 if (ret) {
9640 rtw89_err(rtwdev, "failed to send h2c\n");
9641 goto fail;
9642 }
9643
9644 return 0;
9645
9646 fail:
9647 dev_kfree_skb_any(skb);
9648
9649 return ret;
9650 }
9651
rtw89_fw_h2c_wow_cam_update(struct rtw89_dev * rtwdev,struct rtw89_wow_cam_info * cam_info)9652 int rtw89_fw_h2c_wow_cam_update(struct rtw89_dev *rtwdev,
9653 struct rtw89_wow_cam_info *cam_info)
9654 {
9655 struct rtw89_h2c_wow_cam_update *h2c;
9656 u32 len = sizeof(*h2c);
9657 struct sk_buff *skb;
9658 int ret;
9659
9660 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9661 if (!skb) {
9662 rtw89_err(rtwdev, "failed to alloc skb for wow cam update\n");
9663 return -ENOMEM;
9664 }
9665 skb_put(skb, len);
9666 h2c = (struct rtw89_h2c_wow_cam_update *)skb->data;
9667
9668 h2c->w0 = le32_encode_bits(cam_info->r_w, RTW89_H2C_WOW_CAM_UPD_W0_R_W) |
9669 le32_encode_bits(cam_info->idx, RTW89_H2C_WOW_CAM_UPD_W0_IDX);
9670
9671 if (!cam_info->valid)
9672 goto fill_valid;
9673
9674 h2c->wkfm0 = cam_info->mask[0];
9675 h2c->wkfm1 = cam_info->mask[1];
9676 h2c->wkfm2 = cam_info->mask[2];
9677 h2c->wkfm3 = cam_info->mask[3];
9678 h2c->w5 = le32_encode_bits(cam_info->crc, RTW89_H2C_WOW_CAM_UPD_W5_CRC) |
9679 le32_encode_bits(cam_info->negative_pattern_match,
9680 RTW89_H2C_WOW_CAM_UPD_W5_NEGATIVE_PATTERN_MATCH) |
9681 le32_encode_bits(cam_info->skip_mac_hdr,
9682 RTW89_H2C_WOW_CAM_UPD_W5_SKIP_MAC_HDR) |
9683 le32_encode_bits(cam_info->uc, RTW89_H2C_WOW_CAM_UPD_W5_UC) |
9684 le32_encode_bits(cam_info->mc, RTW89_H2C_WOW_CAM_UPD_W5_MC) |
9685 le32_encode_bits(cam_info->bc, RTW89_H2C_WOW_CAM_UPD_W5_BC);
9686 fill_valid:
9687 h2c->w5 |= le32_encode_bits(cam_info->valid, RTW89_H2C_WOW_CAM_UPD_W5_VALID);
9688
9689 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9690 H2C_CAT_MAC,
9691 H2C_CL_MAC_WOW,
9692 H2C_FUNC_WOW_CAM_UPD, 0, 1,
9693 len);
9694
9695 ret = rtw89_h2c_tx(rtwdev, skb, false);
9696 if (ret) {
9697 rtw89_err(rtwdev, "failed to send h2c\n");
9698 goto fail;
9699 }
9700
9701 return 0;
9702 fail:
9703 dev_kfree_skb_any(skb);
9704
9705 return ret;
9706 }
9707 EXPORT_SYMBOL(rtw89_fw_h2c_wow_cam_update);
9708
rtw89_fw_h2c_wow_cam_update_v1(struct rtw89_dev * rtwdev,struct rtw89_wow_cam_info * cam_info)9709 int rtw89_fw_h2c_wow_cam_update_v1(struct rtw89_dev *rtwdev,
9710 struct rtw89_wow_cam_info *cam_info)
9711 {
9712 struct rtw89_h2c_wow_payload_cam_update *h2c;
9713 u32 len = sizeof(*h2c);
9714 struct sk_buff *skb;
9715 int ret;
9716
9717 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9718 if (!skb) {
9719 rtw89_err(rtwdev, "failed to alloc skb for wow payload cam update\n");
9720 return -ENOMEM;
9721 }
9722 skb_put(skb, len);
9723 h2c = (struct rtw89_h2c_wow_payload_cam_update *)skb->data;
9724
9725 h2c->w0 = le32_encode_bits(cam_info->r_w, RTW89_H2C_WOW_PLD_CAM_UPD_W0_R_W) |
9726 le32_encode_bits(cam_info->idx, RTW89_H2C_WOW_PLD_CAM_UPD_W0_IDX);
9727 h2c->w8 = le32_encode_bits(cam_info->valid, RTW89_H2C_WOW_PLD_CAM_UPD_W8_VALID) |
9728 le32_encode_bits(1, RTW89_H2C_WOW_PLD_CAM_UPD_W8_WOW_PTR);
9729
9730 if (!cam_info->valid)
9731 goto done;
9732
9733 h2c->wkfm0 = cam_info->mask[0];
9734 h2c->wkfm1 = cam_info->mask[1];
9735 h2c->wkfm2 = cam_info->mask[2];
9736 h2c->wkfm3 = cam_info->mask[3];
9737 h2c->w5 = le32_encode_bits(cam_info->uc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_UC) |
9738 le32_encode_bits(cam_info->mc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_MC) |
9739 le32_encode_bits(cam_info->bc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_BC) |
9740 le32_encode_bits(cam_info->skip_mac_hdr,
9741 RTW89_H2C_WOW_PLD_CAM_UPD_W5_SKIP_MAC_HDR);
9742 h2c->w6 = le32_encode_bits(cam_info->crc, RTW89_H2C_WOW_PLD_CAM_UPD_W6_CRC);
9743 h2c->w7 = le32_encode_bits(cam_info->negative_pattern_match,
9744 RTW89_H2C_WOW_PLD_CAM_UPD_W7_NEGATIVE_PATTERN_MATCH);
9745
9746 done:
9747 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9748 H2C_CAT_MAC,
9749 H2C_CL_MAC_WOW,
9750 H2C_FUNC_WOW_PLD_CAM_UPD, 0, 1,
9751 len);
9752
9753 ret = rtw89_h2c_tx(rtwdev, skb, false);
9754 if (ret) {
9755 rtw89_err(rtwdev, "failed to send h2c\n");
9756 goto fail;
9757 }
9758
9759 return 0;
9760 fail:
9761 dev_kfree_skb_any(skb);
9762
9763 return ret;
9764 }
9765 EXPORT_SYMBOL(rtw89_fw_h2c_wow_cam_update_v1);
9766
rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9767 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
9768 struct rtw89_vif_link *rtwvif_link,
9769 bool enable)
9770 {
9771 struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
9772 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
9773 struct rtw89_h2c_wow_gtk_ofld *h2c;
9774 u8 macid = rtwvif_link->mac_id;
9775 u32 len = sizeof(*h2c);
9776 u8 pkt_id_sa_query = 0;
9777 struct sk_buff *skb;
9778 u8 pkt_id_eapol = 0;
9779 int ret;
9780
9781 if (!rtw_wow->gtk_alg)
9782 return 0;
9783
9784 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9785 if (!skb) {
9786 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n");
9787 return -ENOMEM;
9788 }
9789
9790 skb_put(skb, len);
9791 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data;
9792
9793 if (!enable)
9794 goto hdr;
9795
9796 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
9797 RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
9798 &pkt_id_eapol);
9799 if (ret)
9800 goto fail;
9801
9802 if (gtk_info->igtk_keyid) {
9803 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link,
9804 RTW89_PKT_OFLD_TYPE_SA_QUERY,
9805 &pkt_id_sa_query);
9806 if (ret)
9807 goto fail;
9808 }
9809
9810 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
9811 le32_encode_bits(!!memchr_inv(gtk_info->txmickey, 0,
9812 sizeof(gtk_info->txmickey)),
9813 RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
9814 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
9815 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
9816 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
9817 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID);
9818 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0,
9819 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) |
9820 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT);
9821 h2c->gtk_info = rtw_wow->gtk_info;
9822
9823 hdr:
9824 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9825 H2C_CAT_MAC,
9826 H2C_CL_MAC_WOW,
9827 H2C_FUNC_GTK_OFLD, 0, 1,
9828 len);
9829
9830 ret = rtw89_h2c_tx(rtwdev, skb, false);
9831 if (ret) {
9832 rtw89_err(rtwdev, "failed to send h2c\n");
9833 goto fail;
9834 }
9835 return 0;
9836 fail:
9837 dev_kfree_skb_any(skb);
9838
9839 return ret;
9840 }
9841
rtw89_fw_h2c_fwips(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)9842 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
9843 bool enable)
9844 {
9845 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait;
9846 struct rtw89_h2c_fwips *h2c;
9847 u32 len = sizeof(*h2c);
9848 struct sk_buff *skb;
9849
9850 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9851 if (!skb) {
9852 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n");
9853 return -ENOMEM;
9854 }
9855 skb_put(skb, len);
9856 h2c = (struct rtw89_h2c_fwips *)skb->data;
9857
9858 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) |
9859 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE);
9860
9861 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9862 H2C_CAT_MAC,
9863 H2C_CL_MAC_PS,
9864 H2C_FUNC_IPS_CFG, 0, 1,
9865 len);
9866
9867 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG);
9868 }
9869
rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev * rtwdev)9870 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
9871 {
9872 struct rtw89_wait_info *wait = &rtwdev->wow.wait;
9873 struct rtw89_h2c_wow_aoac *h2c;
9874 u32 len = sizeof(*h2c);
9875 struct sk_buff *skb;
9876
9877 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
9878 if (!skb) {
9879 rtw89_err(rtwdev, "failed to alloc skb for aoac\n");
9880 return -ENOMEM;
9881 }
9882
9883 skb_put(skb, len);
9884
9885 /* This H2C only nofity firmware to generate AOAC report C2H,
9886 * no need any parameter.
9887 */
9888 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9889 H2C_CAT_MAC,
9890 H2C_CL_MAC_WOW,
9891 H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
9892 len);
9893
9894 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC);
9895 }
9896
9897 /* Return < 0, if failures happen during waiting for the condition.
9898 * Return 0, when waiting for the condition succeeds.
9899 * Return > 0, if the wait is considered unreachable due to driver/FW design,
9900 * where 1 means during SER.
9901 */
rtw89_h2c_tx_and_wait(struct rtw89_dev * rtwdev,struct sk_buff * skb,struct rtw89_wait_info * wait,unsigned int cond)9902 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
9903 struct rtw89_wait_info *wait, unsigned int cond)
9904 {
9905 struct rtw89_wait_response *prep;
9906 int ret = 0;
9907
9908 lockdep_assert_wiphy(rtwdev->hw->wiphy);
9909
9910 prep = rtw89_wait_for_cond_prep(wait, cond);
9911 if (IS_ERR(prep))
9912 goto out;
9913
9914 ret = rtw89_h2c_tx(rtwdev, skb, false);
9915 if (ret) {
9916 rtw89_err(rtwdev, "failed to send h2c\n");
9917 dev_kfree_skb_any(skb);
9918 ret = -EBUSY;
9919 goto out;
9920 }
9921
9922 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) {
9923 ret = 1;
9924 goto out;
9925 }
9926
9927 out:
9928 return rtw89_wait_for_cond_eval(wait, prep, ret);
9929 }
9930
9931 #define H2C_ADD_MCC_LEN 16
rtw89_fw_h2c_add_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_add_req * p)9932 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
9933 const struct rtw89_fw_mcc_add_req *p)
9934 {
9935 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
9936 struct sk_buff *skb;
9937 unsigned int cond;
9938
9939 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN);
9940 if (!skb) {
9941 rtw89_err(rtwdev,
9942 "failed to alloc skb for add mcc\n");
9943 return -ENOMEM;
9944 }
9945
9946 skb_put(skb, H2C_ADD_MCC_LEN);
9947 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid);
9948 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0);
9949 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1);
9950 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch);
9951 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth);
9952 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group);
9953 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt);
9954 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null);
9955 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry);
9956 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch);
9957 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count);
9958 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early);
9959 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g);
9960 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en);
9961 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass);
9962 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type);
9963 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration);
9964 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en);
9965 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num);
9966 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target);
9967
9968 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
9969 H2C_CAT_MAC,
9970 H2C_CL_MCC,
9971 H2C_FUNC_ADD_MCC, 0, 0,
9972 H2C_ADD_MCC_LEN);
9973
9974 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC);
9975 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
9976 }
9977
9978 #define H2C_START_MCC_LEN 12
rtw89_fw_h2c_start_mcc(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_start_req * p)9979 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
9980 const struct rtw89_fw_mcc_start_req *p)
9981 {
9982 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
9983 struct sk_buff *skb;
9984 unsigned int cond;
9985
9986 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN);
9987 if (!skb) {
9988 rtw89_err(rtwdev,
9989 "failed to alloc skb for start mcc\n");
9990 return -ENOMEM;
9991 }
9992
9993 skb_put(skb, H2C_START_MCC_LEN);
9994 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group);
9995 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group);
9996 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action);
9997 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group);
9998 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt);
9999 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en);
10000 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid);
10001 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low);
10002 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high);
10003
10004 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10005 H2C_CAT_MAC,
10006 H2C_CL_MCC,
10007 H2C_FUNC_START_MCC, 0, 0,
10008 H2C_START_MCC_LEN);
10009
10010 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC);
10011 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10012 }
10013
10014 #define H2C_STOP_MCC_LEN 4
rtw89_fw_h2c_stop_mcc(struct rtw89_dev * rtwdev,u8 group,u8 macid,bool prev_groups)10015 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid,
10016 bool prev_groups)
10017 {
10018 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10019 struct sk_buff *skb;
10020 unsigned int cond;
10021
10022 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN);
10023 if (!skb) {
10024 rtw89_err(rtwdev,
10025 "failed to alloc skb for stop mcc\n");
10026 return -ENOMEM;
10027 }
10028
10029 skb_put(skb, H2C_STOP_MCC_LEN);
10030 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid);
10031 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group);
10032 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups);
10033
10034 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10035 H2C_CAT_MAC,
10036 H2C_CL_MCC,
10037 H2C_FUNC_STOP_MCC, 0, 0,
10038 H2C_STOP_MCC_LEN);
10039
10040 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC);
10041 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10042 }
10043
10044 #define H2C_DEL_MCC_GROUP_LEN 4
rtw89_fw_h2c_del_mcc_group(struct rtw89_dev * rtwdev,u8 group,bool prev_groups)10045 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group,
10046 bool prev_groups)
10047 {
10048 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10049 struct sk_buff *skb;
10050 unsigned int cond;
10051
10052 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN);
10053 if (!skb) {
10054 rtw89_err(rtwdev,
10055 "failed to alloc skb for del mcc group\n");
10056 return -ENOMEM;
10057 }
10058
10059 skb_put(skb, H2C_DEL_MCC_GROUP_LEN);
10060 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group);
10061 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups);
10062
10063 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10064 H2C_CAT_MAC,
10065 H2C_CL_MCC,
10066 H2C_FUNC_DEL_MCC_GROUP, 0, 0,
10067 H2C_DEL_MCC_GROUP_LEN);
10068
10069 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP);
10070 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10071 }
10072
10073 #define H2C_RESET_MCC_GROUP_LEN 4
rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev * rtwdev,u8 group)10074 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group)
10075 {
10076 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10077 struct sk_buff *skb;
10078 unsigned int cond;
10079
10080 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN);
10081 if (!skb) {
10082 rtw89_err(rtwdev,
10083 "failed to alloc skb for reset mcc group\n");
10084 return -ENOMEM;
10085 }
10086
10087 skb_put(skb, H2C_RESET_MCC_GROUP_LEN);
10088 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group);
10089
10090 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10091 H2C_CAT_MAC,
10092 H2C_CL_MCC,
10093 H2C_FUNC_RESET_MCC_GROUP, 0, 0,
10094 H2C_RESET_MCC_GROUP_LEN);
10095
10096 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP);
10097 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10098 }
10099
10100 #define H2C_MCC_REQ_TSF_LEN 4
rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_tsf_req * req,struct rtw89_mac_mcc_tsf_rpt * rpt)10101 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev,
10102 const struct rtw89_fw_mcc_tsf_req *req,
10103 struct rtw89_mac_mcc_tsf_rpt *rpt)
10104 {
10105 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10106 struct rtw89_mac_mcc_tsf_rpt *tmp;
10107 struct sk_buff *skb;
10108 unsigned int cond;
10109 int ret;
10110
10111 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN);
10112 if (!skb) {
10113 rtw89_err(rtwdev,
10114 "failed to alloc skb for mcc req tsf\n");
10115 return -ENOMEM;
10116 }
10117
10118 skb_put(skb, H2C_MCC_REQ_TSF_LEN);
10119 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group);
10120 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x);
10121 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y);
10122
10123 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10124 H2C_CAT_MAC,
10125 H2C_CL_MCC,
10126 H2C_FUNC_MCC_REQ_TSF, 0, 0,
10127 H2C_MCC_REQ_TSF_LEN);
10128
10129 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF);
10130 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10131 if (ret)
10132 return ret;
10133
10134 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf;
10135 *rpt = *tmp;
10136
10137 return 0;
10138 }
10139
10140 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4
rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev * rtwdev,u8 group,u8 macid,u8 * bitmap)10141 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid,
10142 u8 *bitmap)
10143 {
10144 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10145 struct sk_buff *skb;
10146 unsigned int cond;
10147 u8 map_len;
10148 u8 h2c_len;
10149
10150 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8);
10151 map_len = RTW89_MAX_MAC_ID_NUM / 8;
10152 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len;
10153 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len);
10154 if (!skb) {
10155 rtw89_err(rtwdev,
10156 "failed to alloc skb for mcc macid bitmap\n");
10157 return -ENOMEM;
10158 }
10159
10160 skb_put(skb, h2c_len);
10161 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group);
10162 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid);
10163 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len);
10164 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len);
10165
10166 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10167 H2C_CAT_MAC,
10168 H2C_CL_MCC,
10169 H2C_FUNC_MCC_MACID_BITMAP, 0, 0,
10170 h2c_len);
10171
10172 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP);
10173 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10174 }
10175
10176 #define H2C_MCC_SYNC_LEN 4
rtw89_fw_h2c_mcc_sync(struct rtw89_dev * rtwdev,u8 group,u8 source,u8 target,u8 offset)10177 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source,
10178 u8 target, u8 offset)
10179 {
10180 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10181 struct sk_buff *skb;
10182 unsigned int cond;
10183
10184 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN);
10185 if (!skb) {
10186 rtw89_err(rtwdev,
10187 "failed to alloc skb for mcc sync\n");
10188 return -ENOMEM;
10189 }
10190
10191 skb_put(skb, H2C_MCC_SYNC_LEN);
10192 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group);
10193 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source);
10194 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target);
10195 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset);
10196
10197 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10198 H2C_CAT_MAC,
10199 H2C_CL_MCC,
10200 H2C_FUNC_MCC_SYNC, 0, 0,
10201 H2C_MCC_SYNC_LEN);
10202
10203 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC);
10204 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10205 }
10206
10207 #define H2C_MCC_SET_DURATION_LEN 20
rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mcc_duration * p)10208 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev,
10209 const struct rtw89_fw_mcc_duration *p)
10210 {
10211 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10212 struct sk_buff *skb;
10213 unsigned int cond;
10214
10215 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN);
10216 if (!skb) {
10217 rtw89_err(rtwdev,
10218 "failed to alloc skb for mcc set duration\n");
10219 return -ENOMEM;
10220 }
10221
10222 skb_put(skb, H2C_MCC_SET_DURATION_LEN);
10223 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group);
10224 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group);
10225 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid);
10226 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x);
10227 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y);
10228 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data,
10229 p->start_tsf_low);
10230 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data,
10231 p->start_tsf_high);
10232 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x);
10233 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y);
10234
10235 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10236 H2C_CAT_MAC,
10237 H2C_CL_MCC,
10238 H2C_FUNC_MCC_SET_DURATION, 0, 0,
10239 H2C_MCC_SET_DURATION_LEN);
10240
10241 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION);
10242 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10243 }
10244
10245 static
rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_add_slot_arg * slot_arg,struct rtw89_h2c_mrc_add_slot * slot_h2c)10246 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev,
10247 const struct rtw89_fw_mrc_add_slot_arg *slot_arg,
10248 struct rtw89_h2c_mrc_add_slot *slot_h2c)
10249 {
10250 bool fill_h2c = !!slot_h2c;
10251 unsigned int i;
10252
10253 if (!fill_h2c)
10254 goto calc_len;
10255
10256 slot_h2c->w0 = le32_encode_bits(slot_arg->duration,
10257 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) |
10258 le32_encode_bits(slot_arg->courtesy_en,
10259 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) |
10260 le32_encode_bits(slot_arg->role_num,
10261 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM);
10262 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period,
10263 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) |
10264 le32_encode_bits(slot_arg->courtesy_target,
10265 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET);
10266
10267 for (i = 0; i < slot_arg->role_num; i++) {
10268 slot_h2c->roles[i].w0 =
10269 le32_encode_bits(slot_arg->roles[i].macid,
10270 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) |
10271 le32_encode_bits(slot_arg->roles[i].role_type,
10272 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) |
10273 le32_encode_bits(slot_arg->roles[i].is_master,
10274 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) |
10275 le32_encode_bits(slot_arg->roles[i].en_tx_null,
10276 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) |
10277 le32_encode_bits(false,
10278 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) |
10279 le32_encode_bits(false,
10280 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN);
10281 slot_h2c->roles[i].w1 =
10282 le32_encode_bits(slot_arg->roles[i].central_ch,
10283 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) |
10284 le32_encode_bits(slot_arg->roles[i].primary_ch,
10285 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) |
10286 le32_encode_bits(slot_arg->roles[i].bw,
10287 RTW89_H2C_MRC_ADD_ROLE_W1_BW) |
10288 le32_encode_bits(slot_arg->roles[i].band,
10289 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) |
10290 le32_encode_bits(slot_arg->roles[i].null_early,
10291 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) |
10292 le32_encode_bits(false,
10293 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) |
10294 le32_encode_bits(true,
10295 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC);
10296 slot_h2c->roles[i].macid_main_bitmap =
10297 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap);
10298 slot_h2c->roles[i].macid_paired_bitmap =
10299 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap);
10300 }
10301
10302 calc_len:
10303 return struct_size(slot_h2c, roles, slot_arg->role_num);
10304 }
10305
rtw89_fw_h2c_mrc_add(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_add_arg * arg)10306 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev,
10307 const struct rtw89_fw_mrc_add_arg *arg)
10308 {
10309 struct rtw89_h2c_mrc_add *h2c_head;
10310 struct sk_buff *skb;
10311 unsigned int i;
10312 void *tmp;
10313 u32 len;
10314 int ret;
10315
10316 len = sizeof(*h2c_head);
10317 for (i = 0; i < arg->slot_num; i++)
10318 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL);
10319
10320 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10321 if (!skb) {
10322 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n");
10323 return -ENOMEM;
10324 }
10325
10326 skb_put(skb, len);
10327 tmp = skb->data;
10328
10329 h2c_head = tmp;
10330 h2c_head->w0 = le32_encode_bits(arg->sch_idx,
10331 RTW89_H2C_MRC_ADD_W0_SCH_IDX) |
10332 le32_encode_bits(arg->sch_type,
10333 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) |
10334 le32_encode_bits(arg->slot_num,
10335 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) |
10336 le32_encode_bits(arg->btc_in_sch,
10337 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH);
10338
10339 tmp += sizeof(*h2c_head);
10340 for (i = 0; i < arg->slot_num; i++)
10341 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp);
10342
10343 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10344 H2C_CAT_MAC,
10345 H2C_CL_MRC,
10346 H2C_FUNC_ADD_MRC, 0, 0,
10347 len);
10348
10349 ret = rtw89_h2c_tx(rtwdev, skb, false);
10350 if (ret) {
10351 rtw89_err(rtwdev, "failed to send h2c\n");
10352 dev_kfree_skb_any(skb);
10353 return -EBUSY;
10354 }
10355
10356 return 0;
10357 }
10358
rtw89_fw_h2c_mrc_start(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_start_arg * arg)10359 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev,
10360 const struct rtw89_fw_mrc_start_arg *arg)
10361 {
10362 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10363 struct rtw89_h2c_mrc_start *h2c;
10364 u32 len = sizeof(*h2c);
10365 struct sk_buff *skb;
10366 unsigned int cond;
10367
10368 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10369 if (!skb) {
10370 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n");
10371 return -ENOMEM;
10372 }
10373
10374 skb_put(skb, len);
10375 h2c = (struct rtw89_h2c_mrc_start *)skb->data;
10376
10377 h2c->w0 = le32_encode_bits(arg->sch_idx,
10378 RTW89_H2C_MRC_START_W0_SCH_IDX) |
10379 le32_encode_bits(arg->old_sch_idx,
10380 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) |
10381 le32_encode_bits(arg->action,
10382 RTW89_H2C_MRC_START_W0_ACTION);
10383
10384 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
10385 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
10386
10387 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10388 H2C_CAT_MAC,
10389 H2C_CL_MRC,
10390 H2C_FUNC_START_MRC, 0, 0,
10391 len);
10392
10393 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC);
10394 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10395 }
10396
rtw89_fw_h2c_mrc_del(struct rtw89_dev * rtwdev,u8 sch_idx,u8 slot_idx)10397 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx)
10398 {
10399 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10400 struct rtw89_h2c_mrc_del *h2c;
10401 u32 len = sizeof(*h2c);
10402 struct sk_buff *skb;
10403 unsigned int cond;
10404
10405 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10406 if (!skb) {
10407 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n");
10408 return -ENOMEM;
10409 }
10410
10411 skb_put(skb, len);
10412 h2c = (struct rtw89_h2c_mrc_del *)skb->data;
10413
10414 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) |
10415 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX);
10416
10417 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10418 H2C_CAT_MAC,
10419 H2C_CL_MRC,
10420 H2C_FUNC_DEL_MRC, 0, 0,
10421 len);
10422
10423 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC);
10424 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10425 }
10426
rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_req_tsf_arg * arg,struct rtw89_mac_mrc_tsf_rpt * rpt)10427 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev,
10428 const struct rtw89_fw_mrc_req_tsf_arg *arg,
10429 struct rtw89_mac_mrc_tsf_rpt *rpt)
10430 {
10431 struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
10432 struct rtw89_h2c_mrc_req_tsf *h2c;
10433 struct rtw89_mac_mrc_tsf_rpt *tmp;
10434 struct sk_buff *skb;
10435 unsigned int i;
10436 u32 len;
10437 int ret;
10438
10439 len = struct_size(h2c, infos, arg->num);
10440 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10441 if (!skb) {
10442 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n");
10443 return -ENOMEM;
10444 }
10445
10446 skb_put(skb, len);
10447 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data;
10448
10449 h2c->req_tsf_num = arg->num;
10450 for (i = 0; i < arg->num; i++)
10451 h2c->infos[i] =
10452 u8_encode_bits(arg->infos[i].band,
10453 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) |
10454 u8_encode_bits(arg->infos[i].port,
10455 RTW89_H2C_MRC_REQ_TSF_INFO_PORT);
10456
10457 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10458 H2C_CAT_MAC,
10459 H2C_CL_MRC,
10460 H2C_FUNC_MRC_REQ_TSF, 0, 0,
10461 len);
10462
10463 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF);
10464 if (ret)
10465 return ret;
10466
10467 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf;
10468 *rpt = *tmp;
10469
10470 return 0;
10471 }
10472
rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_upd_bitmap_arg * arg)10473 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev,
10474 const struct rtw89_fw_mrc_upd_bitmap_arg *arg)
10475 {
10476 struct rtw89_h2c_mrc_upd_bitmap *h2c;
10477 u32 len = sizeof(*h2c);
10478 struct sk_buff *skb;
10479 int ret;
10480
10481 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10482 if (!skb) {
10483 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n");
10484 return -ENOMEM;
10485 }
10486
10487 skb_put(skb, len);
10488 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data;
10489
10490 h2c->w0 = le32_encode_bits(arg->sch_idx,
10491 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) |
10492 le32_encode_bits(arg->action,
10493 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) |
10494 le32_encode_bits(arg->macid,
10495 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID);
10496 h2c->w1 = le32_encode_bits(arg->client_macid,
10497 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID);
10498
10499 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10500 H2C_CAT_MAC,
10501 H2C_CL_MRC,
10502 H2C_FUNC_MRC_UPD_BITMAP, 0, 0,
10503 len);
10504
10505 ret = rtw89_h2c_tx(rtwdev, skb, false);
10506 if (ret) {
10507 rtw89_err(rtwdev, "failed to send h2c\n");
10508 dev_kfree_skb_any(skb);
10509 return -EBUSY;
10510 }
10511
10512 return 0;
10513 }
10514
rtw89_fw_h2c_mrc_sync(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_sync_arg * arg)10515 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev,
10516 const struct rtw89_fw_mrc_sync_arg *arg)
10517 {
10518 struct rtw89_h2c_mrc_sync *h2c;
10519 u32 len = sizeof(*h2c);
10520 struct sk_buff *skb;
10521 int ret;
10522
10523 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10524 if (!skb) {
10525 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n");
10526 return -ENOMEM;
10527 }
10528
10529 skb_put(skb, len);
10530 h2c = (struct rtw89_h2c_mrc_sync *)skb->data;
10531
10532 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) |
10533 le32_encode_bits(arg->src.port,
10534 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) |
10535 le32_encode_bits(arg->src.band,
10536 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) |
10537 le32_encode_bits(arg->dest.port,
10538 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) |
10539 le32_encode_bits(arg->dest.band,
10540 RTW89_H2C_MRC_SYNC_W0_DEST_BAND);
10541 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET);
10542
10543 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10544 H2C_CAT_MAC,
10545 H2C_CL_MRC,
10546 H2C_FUNC_MRC_SYNC, 0, 0,
10547 len);
10548
10549 ret = rtw89_h2c_tx(rtwdev, skb, false);
10550 if (ret) {
10551 rtw89_err(rtwdev, "failed to send h2c\n");
10552 dev_kfree_skb_any(skb);
10553 return -EBUSY;
10554 }
10555
10556 return 0;
10557 }
10558
rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev * rtwdev,const struct rtw89_fw_mrc_upd_duration_arg * arg)10559 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev,
10560 const struct rtw89_fw_mrc_upd_duration_arg *arg)
10561 {
10562 struct rtw89_h2c_mrc_upd_duration *h2c;
10563 struct sk_buff *skb;
10564 unsigned int i;
10565 u32 len;
10566 int ret;
10567
10568 len = struct_size(h2c, slots, arg->slot_num);
10569 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10570 if (!skb) {
10571 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n");
10572 return -ENOMEM;
10573 }
10574
10575 skb_put(skb, len);
10576 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data;
10577
10578 h2c->w0 = le32_encode_bits(arg->sch_idx,
10579 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) |
10580 le32_encode_bits(arg->slot_num,
10581 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) |
10582 le32_encode_bits(false,
10583 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH);
10584
10585 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32);
10586 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf);
10587
10588 for (i = 0; i < arg->slot_num; i++) {
10589 h2c->slots[i] =
10590 le32_encode_bits(arg->slots[i].slot_idx,
10591 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) |
10592 le32_encode_bits(arg->slots[i].duration,
10593 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION);
10594 }
10595
10596 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10597 H2C_CAT_MAC,
10598 H2C_CL_MRC,
10599 H2C_FUNC_MRC_UPD_DURATION, 0, 0,
10600 len);
10601
10602 ret = rtw89_h2c_tx(rtwdev, skb, false);
10603 if (ret) {
10604 rtw89_err(rtwdev, "failed to send h2c\n");
10605 dev_kfree_skb_any(skb);
10606 return -EBUSY;
10607 }
10608
10609 return 0;
10610 }
10611
rtw89_fw_h2c_ap_info(struct rtw89_dev * rtwdev,bool en)10612 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en)
10613 {
10614 struct rtw89_h2c_ap_info *h2c;
10615 u32 len = sizeof(*h2c);
10616 struct sk_buff *skb;
10617 int ret;
10618
10619 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10620 if (!skb) {
10621 rtw89_err(rtwdev, "failed to alloc skb for ap info\n");
10622 return -ENOMEM;
10623 }
10624
10625 skb_put(skb, len);
10626 h2c = (struct rtw89_h2c_ap_info *)skb->data;
10627
10628 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN);
10629
10630 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10631 H2C_CAT_MAC,
10632 H2C_CL_AP,
10633 H2C_FUNC_AP_INFO, 0, 0,
10634 len);
10635
10636 ret = rtw89_h2c_tx(rtwdev, skb, false);
10637 if (ret) {
10638 rtw89_err(rtwdev, "failed to send h2c\n");
10639 dev_kfree_skb_any(skb);
10640 return -EBUSY;
10641 }
10642
10643 return 0;
10644 }
10645
rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev * rtwdev,bool en)10646 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en)
10647 {
10648 int ret;
10649
10650 if (en) {
10651 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info))
10652 return 0;
10653 } else {
10654 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info))
10655 return 0;
10656 }
10657
10658 ret = rtw89_fw_h2c_ap_info(rtwdev, en);
10659 if (ret) {
10660 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags))
10661 return ret;
10662
10663 /* During recovery, neither driver nor stack has full error
10664 * handling, so show a warning, but return 0 with refcount
10665 * increased normally. It can avoid underflow when calling
10666 * with @en == false later.
10667 */
10668 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n");
10669 }
10670
10671 if (en)
10672 refcount_set(&rtwdev->refcount_ap_info, 1);
10673
10674 return 0;
10675 }
10676
rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev * rtwdev,struct rtw89_vif_link * rtwvif_link,bool enable)10677 int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link,
10678 bool enable)
10679 {
10680 struct rtw89_wait_info *wait = &rtwdev->mlo.wait;
10681 struct rtw89_h2c_mlo_link_cfg *h2c;
10682 u8 mac_id = rtwvif_link->mac_id;
10683 u32 len = sizeof(*h2c);
10684 struct sk_buff *skb;
10685 unsigned int cond;
10686 int ret;
10687
10688 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
10689 if (!skb) {
10690 rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n");
10691 return -ENOMEM;
10692 }
10693
10694 skb_put(skb, len);
10695 h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data;
10696
10697 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) |
10698 le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION);
10699
10700 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
10701 H2C_CAT_MAC,
10702 H2C_CL_MLO,
10703 H2C_FUNC_MLO_LINK_CFG, 0, 0,
10704 len);
10705
10706 cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG);
10707
10708 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
10709 if (ret) {
10710 rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n",
10711 str_enable_disable(enable), rtwvif_link->link_id, ret);
10712 return ret;
10713 }
10714
10715 return 0;
10716 }
10717
__fw_txpwr_entry_zero_ext(const void * ext_ptr,u8 ext_len)10718 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len)
10719 {
10720 static const u8 zeros[U8_MAX] = {};
10721
10722 return memcmp(ext_ptr, zeros, ext_len) == 0;
10723 }
10724
10725 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \
10726 ({ \
10727 u8 __var_sz = sizeof(*(e)); \
10728 bool __accept; \
10729 if (__var_sz >= (ent_sz)) \
10730 __accept = true; \
10731 else \
10732 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\
10733 (ent_sz) - __var_sz);\
10734 __accept; \
10735 })
10736
10737 static bool
fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)10738 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e,
10739 const void *cursor,
10740 const struct rtw89_txpwr_conf *conf)
10741 {
10742 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
10743 return false;
10744
10745 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM)
10746 return false;
10747
10748 switch (e->rs) {
10749 case RTW89_RS_CCK:
10750 if (e->shf + e->len > RTW89_RATE_CCK_NUM)
10751 return false;
10752 break;
10753 case RTW89_RS_OFDM:
10754 if (e->shf + e->len > RTW89_RATE_OFDM_NUM)
10755 return false;
10756 break;
10757 case RTW89_RS_MCS:
10758 if (e->shf + e->len > __RTW89_RATE_MCS_NUM ||
10759 e->nss >= RTW89_NSS_NUM ||
10760 e->ofdma >= RTW89_OFDMA_NUM)
10761 return false;
10762 break;
10763 case RTW89_RS_HEDCM:
10764 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM ||
10765 e->nss >= RTW89_NSS_HEDCM_NUM ||
10766 e->ofdma >= RTW89_OFDMA_NUM)
10767 return false;
10768 break;
10769 case RTW89_RS_OFFSET:
10770 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM)
10771 return false;
10772 break;
10773 default:
10774 return false;
10775 }
10776
10777 return true;
10778 }
10779
10780 static
rtw89_fw_load_txpwr_byrate(struct rtw89_dev * rtwdev,const struct rtw89_txpwr_table * tbl)10781 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev,
10782 const struct rtw89_txpwr_table *tbl)
10783 {
10784 const struct rtw89_txpwr_conf *conf = tbl->data;
10785 struct rtw89_fw_txpwr_byrate_entry entry = {};
10786 struct rtw89_txpwr_byrate *byr_head;
10787 struct rtw89_rate_desc desc = {};
10788 const void *cursor;
10789 u32 data;
10790 s8 *byr;
10791 int i;
10792
10793 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
10794 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf))
10795 continue;
10796
10797 byr_head = &rtwdev->byr[entry.band][entry.bw];
10798 data = le32_to_cpu(entry.data);
10799 desc.ofdma = entry.ofdma;
10800 desc.nss = entry.nss;
10801 desc.rs = entry.rs;
10802
10803 for (i = 0; i < entry.len; i++, data >>= 8) {
10804 desc.idx = entry.shf + i;
10805 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc);
10806 *byr = data & 0xff;
10807 }
10808 }
10809 }
10810
10811 static bool
fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)10812 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e,
10813 const void *cursor,
10814 const struct rtw89_txpwr_conf *conf)
10815 {
10816 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
10817 return false;
10818
10819 if (e->bw >= RTW89_2G_BW_NUM)
10820 return false;
10821 if (e->nt >= RTW89_NTX_NUM)
10822 return false;
10823 if (e->rs >= RTW89_RS_LMT_NUM)
10824 return false;
10825 if (e->bf >= RTW89_BF_NUM)
10826 return false;
10827 if (e->regd >= RTW89_REGD_NUM)
10828 return false;
10829 if (e->ch_idx >= RTW89_2G_CH_NUM)
10830 return false;
10831
10832 return true;
10833 }
10834
10835 static
rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data * data)10836 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data)
10837 {
10838 const struct rtw89_txpwr_conf *conf = &data->conf;
10839 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {};
10840 const void *cursor;
10841
10842 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
10843 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf))
10844 continue;
10845
10846 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
10847 [entry.ch_idx] = entry.v;
10848 }
10849 }
10850
10851 static bool
fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)10852 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e,
10853 const void *cursor,
10854 const struct rtw89_txpwr_conf *conf)
10855 {
10856 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
10857 return false;
10858
10859 if (e->bw >= RTW89_5G_BW_NUM)
10860 return false;
10861 if (e->nt >= RTW89_NTX_NUM)
10862 return false;
10863 if (e->rs >= RTW89_RS_LMT_NUM)
10864 return false;
10865 if (e->bf >= RTW89_BF_NUM)
10866 return false;
10867 if (e->regd >= RTW89_REGD_NUM)
10868 return false;
10869 if (e->ch_idx >= RTW89_5G_CH_NUM)
10870 return false;
10871
10872 return true;
10873 }
10874
10875 static
rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data * data)10876 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data)
10877 {
10878 const struct rtw89_txpwr_conf *conf = &data->conf;
10879 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {};
10880 const void *cursor;
10881
10882 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
10883 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf))
10884 continue;
10885
10886 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
10887 [entry.ch_idx] = entry.v;
10888 }
10889 }
10890
10891 static bool
fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)10892 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e,
10893 const void *cursor,
10894 const struct rtw89_txpwr_conf *conf)
10895 {
10896 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
10897 return false;
10898
10899 if (e->bw >= RTW89_6G_BW_NUM)
10900 return false;
10901 if (e->nt >= RTW89_NTX_NUM)
10902 return false;
10903 if (e->rs >= RTW89_RS_LMT_NUM)
10904 return false;
10905 if (e->bf >= RTW89_BF_NUM)
10906 return false;
10907 if (e->regd >= RTW89_REGD_NUM)
10908 return false;
10909 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
10910 return false;
10911 if (e->ch_idx >= RTW89_6G_CH_NUM)
10912 return false;
10913
10914 return true;
10915 }
10916
10917 static
rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data * data)10918 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data)
10919 {
10920 const struct rtw89_txpwr_conf *conf = &data->conf;
10921 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {};
10922 const void *cursor;
10923
10924 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
10925 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf))
10926 continue;
10927
10928 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd]
10929 [entry.reg_6ghz_power][entry.ch_idx] = entry.v;
10930 }
10931 }
10932
10933 static bool
fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)10934 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e,
10935 const void *cursor,
10936 const struct rtw89_txpwr_conf *conf)
10937 {
10938 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
10939 return false;
10940
10941 if (e->ru >= RTW89_RU_NUM)
10942 return false;
10943 if (e->nt >= RTW89_NTX_NUM)
10944 return false;
10945 if (e->regd >= RTW89_REGD_NUM)
10946 return false;
10947 if (e->ch_idx >= RTW89_2G_CH_NUM)
10948 return false;
10949
10950 return true;
10951 }
10952
10953 static
rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data * data)10954 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data)
10955 {
10956 const struct rtw89_txpwr_conf *conf = &data->conf;
10957 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {};
10958 const void *cursor;
10959
10960 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
10961 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf))
10962 continue;
10963
10964 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
10965 }
10966 }
10967
10968 static bool
fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)10969 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e,
10970 const void *cursor,
10971 const struct rtw89_txpwr_conf *conf)
10972 {
10973 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
10974 return false;
10975
10976 if (e->ru >= RTW89_RU_NUM)
10977 return false;
10978 if (e->nt >= RTW89_NTX_NUM)
10979 return false;
10980 if (e->regd >= RTW89_REGD_NUM)
10981 return false;
10982 if (e->ch_idx >= RTW89_5G_CH_NUM)
10983 return false;
10984
10985 return true;
10986 }
10987
10988 static
rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data * data)10989 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data)
10990 {
10991 const struct rtw89_txpwr_conf *conf = &data->conf;
10992 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {};
10993 const void *cursor;
10994
10995 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
10996 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf))
10997 continue;
10998
10999 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v;
11000 }
11001 }
11002
11003 static bool
fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)11004 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e,
11005 const void *cursor,
11006 const struct rtw89_txpwr_conf *conf)
11007 {
11008 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
11009 return false;
11010
11011 if (e->ru >= RTW89_RU_NUM)
11012 return false;
11013 if (e->nt >= RTW89_NTX_NUM)
11014 return false;
11015 if (e->regd >= RTW89_REGD_NUM)
11016 return false;
11017 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER)
11018 return false;
11019 if (e->ch_idx >= RTW89_6G_CH_NUM)
11020 return false;
11021
11022 return true;
11023 }
11024
11025 static
rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data * data)11026 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data)
11027 {
11028 const struct rtw89_txpwr_conf *conf = &data->conf;
11029 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {};
11030 const void *cursor;
11031
11032 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
11033 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf))
11034 continue;
11035
11036 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power]
11037 [entry.ch_idx] = entry.v;
11038 }
11039 }
11040
11041 static bool
fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)11042 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e,
11043 const void *cursor,
11044 const struct rtw89_txpwr_conf *conf)
11045 {
11046 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
11047 return false;
11048
11049 if (e->band >= RTW89_BAND_NUM)
11050 return false;
11051 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM)
11052 return false;
11053 if (e->regd >= RTW89_REGD_NUM)
11054 return false;
11055
11056 return true;
11057 }
11058
11059 static
rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data * data)11060 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data)
11061 {
11062 const struct rtw89_txpwr_conf *conf = &data->conf;
11063 struct rtw89_fw_tx_shape_lmt_entry entry = {};
11064 const void *cursor;
11065
11066 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
11067 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf))
11068 continue;
11069
11070 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v;
11071 }
11072 }
11073
11074 static bool
fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry * e,const void * cursor,const struct rtw89_txpwr_conf * conf)11075 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e,
11076 const void *cursor,
11077 const struct rtw89_txpwr_conf *conf)
11078 {
11079 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz))
11080 return false;
11081
11082 if (e->band >= RTW89_BAND_NUM)
11083 return false;
11084 if (e->regd >= RTW89_REGD_NUM)
11085 return false;
11086
11087 return true;
11088 }
11089
11090 static
rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data * data)11091 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data)
11092 {
11093 const struct rtw89_txpwr_conf *conf = &data->conf;
11094 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {};
11095 const void *cursor;
11096
11097 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) {
11098 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf))
11099 continue;
11100
11101 data->v[entry.band][entry.regd] = entry.v;
11102 }
11103 }
11104
rtw89_fw_has_da_txpwr_table(struct rtw89_dev * rtwdev,const struct rtw89_rfe_parms * parms)11105 static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev,
11106 const struct rtw89_rfe_parms *parms)
11107 {
11108 const struct rtw89_chip_info *chip = rtwdev->chip;
11109
11110 if (chip->support_bands & BIT(NL80211_BAND_2GHZ) &&
11111 !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru))
11112 return false;
11113
11114 if (chip->support_bands & BIT(NL80211_BAND_5GHZ) &&
11115 !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru))
11116 return false;
11117
11118 if (chip->support_bands & BIT(NL80211_BAND_6GHZ) &&
11119 !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru))
11120 return false;
11121
11122 return true;
11123 }
11124
11125 const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev * rtwdev,const struct rtw89_rfe_parms * init)11126 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
11127 const struct rtw89_rfe_parms *init)
11128 {
11129 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data;
11130 struct rtw89_rfe_parms *parms;
11131
11132 if (!rfe_data)
11133 return init;
11134
11135 parms = &rfe_data->rfe_parms;
11136 if (init)
11137 *parms = *init;
11138
11139 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) {
11140 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf;
11141 rfe_data->byrate.tbl.size = 0; /* don't care here */
11142 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate;
11143 parms->byr_tbl = &rfe_data->byrate.tbl;
11144 }
11145
11146 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) {
11147 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz);
11148 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v;
11149 }
11150
11151 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) {
11152 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz);
11153 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v;
11154 }
11155
11156 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) {
11157 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz);
11158 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v;
11159 }
11160
11161 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) {
11162 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz);
11163 parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v;
11164 }
11165
11166 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) {
11167 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz);
11168 parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v;
11169 }
11170
11171 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) {
11172 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz);
11173 parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v;
11174 }
11175
11176 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) {
11177 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz);
11178 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v;
11179 }
11180
11181 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) {
11182 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz);
11183 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v;
11184 }
11185
11186 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) {
11187 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz);
11188 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v;
11189 }
11190
11191 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) {
11192 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz);
11193 parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v;
11194 }
11195
11196 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) {
11197 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz);
11198 parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v;
11199 }
11200
11201 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) {
11202 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz);
11203 parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v;
11204 }
11205
11206 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) {
11207 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt);
11208 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v;
11209 }
11210
11211 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) {
11212 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru);
11213 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v;
11214 }
11215
11216 parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms);
11217
11218 return parms;
11219 }
11220