1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev); 19 20 struct rtw89_eapol_2_of_2 { 21 u8 gtkbody[14]; 22 u8 key_des_ver; 23 u8 rsvd[92]; 24 } __packed; 25 26 struct rtw89_sa_query { 27 u8 category; 28 u8 action; 29 } __packed; 30 31 struct rtw89_arp_rsp { 32 u8 llc_hdr[sizeof(rfc1042_header)]; 33 __be16 llc_type; 34 struct arphdr arp_hdr; 35 u8 sender_hw[ETH_ALEN]; 36 __be32 sender_ip; 37 u8 target_hw[ETH_ALEN]; 38 __be32 target_ip; 39 } __packed; 40 41 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 42 43 const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = { 44 .ver = 0x00, 45 .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 46 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 47 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 48 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 49 }, 50 }; 51 EXPORT_SYMBOL(rtw89_fw_blacklist_default); 52 53 union rtw89_fw_element_arg { 54 size_t offset; 55 enum rtw89_rf_path rf_path; 56 enum rtw89_fw_type fw_type; 57 }; 58 59 struct rtw89_fw_element_handler { 60 int (*fn)(struct rtw89_dev *rtwdev, 61 const struct rtw89_fw_element_hdr *elm, 62 const union rtw89_fw_element_arg arg); 63 const union rtw89_fw_element_arg arg; 64 const char *name; 65 }; 66 67 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 68 struct sk_buff *skb); 69 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 70 struct rtw89_wait_info *wait, unsigned int cond); 71 static int __parse_security_section(struct rtw89_dev *rtwdev, 72 struct rtw89_fw_bin_info *info, 73 struct rtw89_fw_hdr_section_info *section_info, 74 const void *content, 75 u32 *mssc_len); 76 77 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 78 bool header) 79 { 80 struct sk_buff *skb; 81 u32 header_len = 0; 82 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 83 84 if (header) 85 header_len = H2C_HEADER_LEN; 86 87 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 88 if (!skb) 89 return NULL; 90 skb_reserve(skb, header_len + h2c_desc_size); 91 memset(skb->data, 0, len); 92 93 return skb; 94 } 95 96 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 97 { 98 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 99 } 100 101 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 102 { 103 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 104 } 105 106 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 107 { 108 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 109 u8 val; 110 int ret; 111 112 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 113 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 114 1, FWDL_WAIT_CNT, false, rtwdev, type); 115 if (ret) { 116 switch (val) { 117 case RTW89_FWDL_CHECKSUM_FAIL: 118 rtw89_err(rtwdev, "fw checksum fail\n"); 119 return -EINVAL; 120 121 case RTW89_FWDL_SECURITY_FAIL: 122 rtw89_err(rtwdev, "fw security fail\n"); 123 return -EINVAL; 124 125 case RTW89_FWDL_CV_NOT_MATCH: 126 rtw89_err(rtwdev, "fw cv not match\n"); 127 return -EINVAL; 128 129 default: 130 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 131 return -EBUSY; 132 } 133 } 134 135 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 136 137 return 0; 138 } 139 140 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 141 struct rtw89_fw_bin_info *info) 142 { 143 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 144 const struct rtw89_chip_info *chip = rtwdev->chip; 145 struct rtw89_fw_hdr_section_info *section_info; 146 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 147 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 148 const struct rtw89_fw_hdr_section *section; 149 const u8 *fw_end = fw + len; 150 const u8 *bin; 151 u32 base_hdr_len; 152 u32 mssc_len; 153 int ret; 154 u32 i; 155 156 if (!info) 157 return -EINVAL; 158 159 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 160 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 161 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 162 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 163 164 if (info->dynamic_hdr_en) { 165 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 166 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 167 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 168 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 169 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 170 return -EINVAL; 171 } 172 } else { 173 info->hdr_len = base_hdr_len; 174 info->dynamic_hdr_len = 0; 175 } 176 177 bin = fw + info->hdr_len; 178 179 /* jump to section header */ 180 section_info = info->section_info; 181 for (i = 0; i < info->section_num; i++) { 182 section = &fw_hdr->sections[i]; 183 section_info->type = 184 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 185 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 186 187 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 188 section_info->len += FWDL_SECTION_CHKSUM_LEN; 189 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 190 section_info->dladdr = 191 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 192 section_info->addr = bin; 193 194 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 195 section_info->mssc = 196 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 197 198 ret = __parse_security_section(rtwdev, info, section_info, 199 bin, &mssc_len); 200 if (ret) 201 return ret; 202 203 if (sec->secure_boot && chip->chip_id == RTL8852B) 204 section_info->len_override = 960; 205 } else { 206 section_info->mssc = 0; 207 mssc_len = 0; 208 } 209 210 rtw89_debug(rtwdev, RTW89_DBG_FW, 211 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 212 i, section_info->type, section_info->len, 213 section_info->mssc, mssc_len, bin - fw); 214 rtw89_debug(rtwdev, RTW89_DBG_FW, 215 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 216 section_info->ignore, section_info->key_addr, 217 section_info->key_addr ? 218 section_info->key_addr - section_info->addr : 0, 219 section_info->key_len, section_info->key_idx); 220 221 bin += section_info->len + mssc_len; 222 section_info++; 223 } 224 225 if (fw_end != bin) { 226 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 227 return -EINVAL; 228 } 229 230 return 0; 231 } 232 233 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 234 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 235 u32 rmp_tbl_size, u32 *key_idx) 236 { 237 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 238 u32 sel_byte_idx; 239 u32 mss_sel_idx; 240 u8 sel_bit_idx; 241 int i; 242 243 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 244 if (!mss_hdr->defen) 245 return -ENOENT; 246 247 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 248 sec->mss_key_num; 249 } else { 250 if (mss_hdr->defen) 251 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 252 else 253 mss_sel_idx = 0; 254 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 255 le16_to_cpu(mss_hdr->msscust_max) + 256 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 257 sec->mss_key_num; 258 } 259 260 sel_byte_idx = mss_sel_idx >> 3; 261 sel_bit_idx = mss_sel_idx & 0x7; 262 263 if (sel_byte_idx >= rmp_tbl_size) 264 return -EFAULT; 265 266 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 267 return -ENOENT; 268 269 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 270 271 for (i = 0; i < sel_byte_idx; i++) 272 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 273 274 return 0; 275 } 276 277 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 278 struct rtw89_fw_bin_info *info, 279 struct rtw89_fw_hdr_section_info *section_info, 280 const void *content, 281 u32 *mssc_len) 282 { 283 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 284 const union rtw89_fw_section_mssc_content *section_content = content; 285 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 286 u32 rmp_tbl_size; 287 u32 key_sign_len; 288 u32 real_key_idx; 289 u32 sb_sel_ver; 290 int ret; 291 292 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 293 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 294 return -ENOENT; 295 } 296 297 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 298 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 299 le16_to_cpu(mss_hdr->msscust_max) * 300 mss_hdr->mssdev_max) >> 3; 301 if (mss_hdr->defen) 302 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 303 } else { 304 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 305 mss_hdr->rmpfmt); 306 return -EINVAL; 307 } 308 309 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 310 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 311 rmp_tbl_size, (int)sizeof(*mss_hdr), 312 le32_to_cpu(mss_hdr->key_raw_offset)); 313 return -EINVAL; 314 } 315 316 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 317 if (!key_sign_len) 318 key_sign_len = 512; 319 320 if (info->dsp_checksum) 321 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 322 323 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 324 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 325 326 if (!sec->secure_boot) 327 goto out; 328 329 sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v); 330 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 331 goto ignore; 332 333 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 334 if (ret) 335 goto ignore; 336 337 section_info->key_addr = content + section_info->len + 338 le32_to_cpu(mss_hdr->key_raw_offset) + 339 key_sign_len * real_key_idx; 340 section_info->key_len = key_sign_len; 341 section_info->key_idx = real_key_idx; 342 343 out: 344 if (info->secure_section_exist) { 345 section_info->ignore = true; 346 return 0; 347 } 348 349 info->secure_section_exist = true; 350 351 return 0; 352 353 ignore: 354 section_info->ignore = true; 355 356 return 0; 357 } 358 359 static int __check_secure_blacklist(struct rtw89_dev *rtwdev, 360 struct rtw89_fw_bin_info *info, 361 struct rtw89_fw_hdr_section_info *section_info, 362 const void *content) 363 { 364 const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist; 365 const union rtw89_fw_section_mssc_content *section_content = content; 366 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 367 u8 byte_idx; 368 u8 bit_mask; 369 370 if (!sec->secure_boot) 371 return 0; 372 373 if (!info->secure_section_exist || section_info->ignore) 374 return 0; 375 376 if (!chip_blacklist) { 377 rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n"); 378 return -ENOENT; 379 } 380 381 byte_idx = section_content->blacklist.bit_in_chip_list >> 3; 382 bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7); 383 384 if (section_content->blacklist.ver > chip_blacklist->ver) { 385 rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n", 386 section_content->blacklist.ver, chip_blacklist->ver); 387 return -EINVAL; 388 } 389 390 if (chip_blacklist->list[byte_idx] & bit_mask) { 391 rtw89_warn(rtwdev, "firmware %u in chip blacklist\n", 392 section_content->blacklist.ver); 393 return -EPERM; 394 } 395 396 return 0; 397 } 398 399 static int __parse_security_section(struct rtw89_dev *rtwdev, 400 struct rtw89_fw_bin_info *info, 401 struct rtw89_fw_hdr_section_info *section_info, 402 const void *content, 403 u32 *mssc_len) 404 { 405 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 406 int ret; 407 408 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 409 ret = __parse_formatted_mssc(rtwdev, info, section_info, 410 content, mssc_len); 411 if (ret) 412 return -EINVAL; 413 } else { 414 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 415 if (info->dsp_checksum) 416 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 417 418 if (sec->secure_boot) { 419 if (sec->mss_idx >= section_info->mssc) { 420 rtw89_err(rtwdev, "unexpected MSS %d >= %d\n", 421 sec->mss_idx, section_info->mssc); 422 return -EFAULT; 423 } 424 section_info->key_addr = content + section_info->len + 425 sec->mss_idx * FWDL_SECURITY_SIGLEN; 426 section_info->key_len = FWDL_SECURITY_SIGLEN; 427 } 428 429 info->secure_section_exist = true; 430 } 431 432 ret = __check_secure_blacklist(rtwdev, info, section_info, content); 433 WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n"); 434 435 return 0; 436 } 437 438 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 439 struct rtw89_fw_bin_info *info) 440 { 441 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 442 struct rtw89_fw_hdr_section_info *section_info; 443 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 444 const struct rtw89_fw_hdr_section_v1 *section; 445 const u8 *fw_end = fw + len; 446 const u8 *bin; 447 u32 base_hdr_len; 448 u32 mssc_len; 449 int ret; 450 u32 i; 451 452 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 453 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 454 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 455 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 456 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 457 458 if (info->dynamic_hdr_en) { 459 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 460 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 461 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 462 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 463 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 464 return -EINVAL; 465 } 466 } else { 467 info->hdr_len = base_hdr_len; 468 info->dynamic_hdr_len = 0; 469 } 470 471 bin = fw + info->hdr_len; 472 473 /* jump to section header */ 474 section_info = info->section_info; 475 for (i = 0; i < info->section_num; i++) { 476 section = &fw_hdr->sections[i]; 477 478 section_info->type = 479 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 480 section_info->len = 481 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 482 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 483 section_info->len += FWDL_SECTION_CHKSUM_LEN; 484 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 485 section_info->dladdr = 486 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 487 section_info->addr = bin; 488 489 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 490 section_info->mssc = 491 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 492 493 ret = __parse_security_section(rtwdev, info, section_info, 494 bin, &mssc_len); 495 if (ret) 496 return ret; 497 } else { 498 section_info->mssc = 0; 499 mssc_len = 0; 500 } 501 502 rtw89_debug(rtwdev, RTW89_DBG_FW, 503 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 504 i, section_info->type, section_info->len, 505 section_info->mssc, mssc_len, bin - fw); 506 rtw89_debug(rtwdev, RTW89_DBG_FW, 507 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 508 section_info->ignore, section_info->key_addr, 509 section_info->key_addr ? 510 section_info->key_addr - section_info->addr : 0, 511 section_info->key_len, section_info->key_idx); 512 513 bin += section_info->len + mssc_len; 514 section_info++; 515 } 516 517 if (fw_end != bin) { 518 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 519 return -EINVAL; 520 } 521 522 if (!info->secure_section_exist) 523 rtw89_warn(rtwdev, "no firmware secure section\n"); 524 525 return 0; 526 } 527 528 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 529 const struct rtw89_fw_suit *fw_suit, 530 struct rtw89_fw_bin_info *info) 531 { 532 const u8 *fw = fw_suit->data; 533 u32 len = fw_suit->size; 534 535 if (!fw || !len) { 536 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 537 return -ENOENT; 538 } 539 540 switch (fw_suit->hdr_ver) { 541 case 0: 542 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 543 case 1: 544 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 545 default: 546 return -ENOENT; 547 } 548 } 549 550 static 551 const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev, 552 const struct firmware *firmware) 553 { 554 const struct rtw89_mfw_hdr *mfw_hdr; 555 556 if (sizeof(*mfw_hdr) > firmware->size) 557 return NULL; 558 559 mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0]; 560 561 if (mfw_hdr->sig != RTW89_MFW_SIG) 562 return NULL; 563 564 return mfw_hdr; 565 } 566 567 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev, 568 const struct firmware *firmware, 569 const struct rtw89_mfw_hdr *mfw_hdr) 570 { 571 const void *mfw = firmware->data; 572 u32 mfw_len = firmware->size; 573 u8 fw_nr = mfw_hdr->fw_nr; 574 const void *ptr; 575 576 if (fw_nr == 0) { 577 rtw89_err(rtwdev, "mfw header has no fw entry\n"); 578 return -ENOENT; 579 } 580 581 ptr = &mfw_hdr->info[fw_nr]; 582 583 if (ptr > mfw + mfw_len) { 584 rtw89_err(rtwdev, "mfw header out of address\n"); 585 return -EFAULT; 586 } 587 588 return 0; 589 } 590 591 static 592 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 593 struct rtw89_fw_suit *fw_suit, bool nowarn) 594 { 595 struct rtw89_fw_info *fw_info = &rtwdev->fw; 596 const struct firmware *firmware = fw_info->req.firmware; 597 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 598 const struct rtw89_mfw_hdr *mfw_hdr; 599 const u8 *mfw = firmware->data; 600 u32 mfw_len = firmware->size; 601 int ret; 602 int i; 603 604 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); 605 if (!mfw_hdr) { 606 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 607 /* legacy firmware support normal type only */ 608 if (type != RTW89_FW_NORMAL) 609 return -EINVAL; 610 fw_suit->data = mfw; 611 fw_suit->size = mfw_len; 612 return 0; 613 } 614 615 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 616 if (ret) 617 return ret; 618 619 for (i = 0; i < mfw_hdr->fw_nr; i++) { 620 tmp = &mfw_hdr->info[i]; 621 if (tmp->type != type) 622 continue; 623 624 if (type == RTW89_FW_LOGFMT) { 625 mfw_info = tmp; 626 goto found; 627 } 628 629 /* Version order of WiFi firmware in firmware file are not in order, 630 * pass all firmware to find the equal or less but closest version. 631 */ 632 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 633 if (!mfw_info || mfw_info->cv < tmp->cv) 634 mfw_info = tmp; 635 } 636 } 637 638 if (mfw_info) 639 goto found; 640 641 if (!nowarn) 642 rtw89_err(rtwdev, "no suitable firmware found\n"); 643 return -ENOENT; 644 645 found: 646 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 647 fw_suit->size = le32_to_cpu(mfw_info->size); 648 649 if (fw_suit->data + fw_suit->size > mfw + mfw_len) { 650 rtw89_err(rtwdev, "fw_suit %d out of address\n", type); 651 return -EFAULT; 652 } 653 654 return 0; 655 } 656 657 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 658 { 659 struct rtw89_fw_info *fw_info = &rtwdev->fw; 660 const struct firmware *firmware = fw_info->req.firmware; 661 const struct rtw89_mfw_info *mfw_info; 662 const struct rtw89_mfw_hdr *mfw_hdr; 663 u32 size; 664 int ret; 665 666 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); 667 if (!mfw_hdr) { 668 rtw89_warn(rtwdev, "not mfw format\n"); 669 return 0; 670 } 671 672 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 673 if (ret) 674 return ret; 675 676 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 677 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 678 679 return size; 680 } 681 682 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 683 struct rtw89_fw_suit *fw_suit, 684 const struct rtw89_fw_hdr *hdr) 685 { 686 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 687 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 688 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 689 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 690 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 691 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 692 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 693 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 694 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 695 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 696 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 697 } 698 699 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 700 struct rtw89_fw_suit *fw_suit, 701 const struct rtw89_fw_hdr_v1 *hdr) 702 { 703 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 704 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 705 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 706 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 707 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 708 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 709 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 710 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 711 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 712 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 713 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 714 } 715 716 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 717 enum rtw89_fw_type type, 718 struct rtw89_fw_suit *fw_suit) 719 { 720 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 721 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 722 723 if (type == RTW89_FW_LOGFMT) 724 return 0; 725 726 fw_suit->type = type; 727 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 728 729 switch (fw_suit->hdr_ver) { 730 case 0: 731 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 732 break; 733 case 1: 734 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 735 break; 736 default: 737 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 738 fw_suit->hdr_ver); 739 return -ENOENT; 740 } 741 742 rtw89_info(rtwdev, 743 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 744 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 745 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 746 747 return 0; 748 } 749 750 static 751 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 752 bool nowarn) 753 { 754 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 755 int ret; 756 757 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 758 if (ret) 759 return ret; 760 761 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 762 } 763 764 static 765 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 766 const struct rtw89_fw_element_hdr *elm, 767 const union rtw89_fw_element_arg arg) 768 { 769 enum rtw89_fw_type type = arg.fw_type; 770 struct rtw89_hal *hal = &rtwdev->hal; 771 struct rtw89_fw_suit *fw_suit; 772 773 /* Version of BB MCU is in decreasing order in firmware file, so take 774 * first equal or less version, which is equal or less but closest version. 775 */ 776 if (hal->cv < elm->u.bbmcu.cv) 777 return 1; /* ignore this element */ 778 779 fw_suit = rtw89_fw_suit_get(rtwdev, type); 780 if (fw_suit->data) 781 return 1; /* ignore this element (a firmware is taken already) */ 782 783 fw_suit->data = elm->u.bbmcu.contents; 784 fw_suit->size = le32_to_cpu(elm->size); 785 786 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 787 } 788 789 #define __DEF_FW_FEAT_COND(__cond, __op) \ 790 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 791 { \ 792 return suit_ver_code __op comp_ver_code; \ 793 } 794 795 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 796 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 797 __DEF_FW_FEAT_COND(lt, <); /* less than */ 798 799 struct __fw_feat_cfg { 800 enum rtw89_core_chip_id chip_id; 801 enum rtw89_fw_feature feature; 802 u32 ver_code; 803 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 804 }; 805 806 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 807 { \ 808 .chip_id = _chip, \ 809 .feature = RTW89_FW_FEATURE_ ## _feat, \ 810 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 811 .cond = __fw_feat_cond_ ## _cond, \ 812 } 813 814 static const struct __fw_feat_cfg fw_feat_tbl[] = { 815 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 816 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 817 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 818 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 819 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 820 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 821 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 822 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 823 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 824 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 825 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 826 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 827 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 828 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 829 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 830 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 831 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 832 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 833 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 834 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 835 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 836 __CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0), 837 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 838 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 839 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 840 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 841 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 842 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1), 843 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 844 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 845 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 846 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 847 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 848 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 849 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 28, 0, RFK_IQK_V0), 850 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 851 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 852 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 853 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 854 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 855 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 856 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), 857 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE), 858 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1), 859 }; 860 861 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 862 const struct rtw89_chip_info *chip, 863 u32 ver_code) 864 { 865 int i; 866 867 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 868 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 869 870 if (chip->chip_id != ent->chip_id) 871 continue; 872 873 if (ent->cond(ver_code, ent->ver_code)) 874 RTW89_SET_FW_FEATURE(ent->feature, fw); 875 } 876 } 877 878 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 879 { 880 const struct rtw89_chip_info *chip = rtwdev->chip; 881 const struct rtw89_fw_suit *fw_suit; 882 u32 suit_ver_code; 883 884 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 885 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 886 887 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 888 } 889 890 const struct firmware * 891 rtw89_early_fw_feature_recognize(struct device *device, 892 const struct rtw89_chip_info *chip, 893 struct rtw89_fw_info *early_fw, 894 int *used_fw_format) 895 { 896 const struct firmware *firmware; 897 char fw_name[64]; 898 int fw_format; 899 u32 ver_code; 900 int ret; 901 902 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 903 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 904 chip->fw_basename, fw_format); 905 906 ret = request_firmware(&firmware, fw_name, device); 907 if (!ret) { 908 dev_info(device, "loaded firmware %s\n", fw_name); 909 *used_fw_format = fw_format; 910 break; 911 } 912 } 913 914 if (ret) { 915 dev_err(device, "failed to early request firmware: %d\n", ret); 916 return NULL; 917 } 918 919 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 920 921 if (!ver_code) 922 goto out; 923 924 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 925 926 out: 927 return firmware; 928 } 929 930 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev) 931 { 932 const struct rtw89_chip_variant *variant = rtwdev->variant; 933 const struct rtw89_fw_suit *fw_suit; 934 u32 suit_ver_code; 935 936 if (!variant) 937 return 0; 938 939 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 940 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 941 942 if (variant->fw_min_ver_code > suit_ver_code) { 943 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n", 944 variant->fw_min_ver_code); 945 return -ENOENT; 946 } 947 948 return 0; 949 } 950 951 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 952 { 953 const struct rtw89_chip_info *chip = rtwdev->chip; 954 int ret; 955 956 if (chip->try_ce_fw) { 957 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 958 if (!ret) 959 goto normal_done; 960 } 961 962 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 963 if (ret) 964 return ret; 965 966 normal_done: 967 ret = rtw89_fw_validate_ver_required(rtwdev); 968 if (ret) 969 return ret; 970 971 /* It still works if wowlan firmware isn't existing. */ 972 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 973 974 /* It still works if log format file isn't existing. */ 975 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 976 977 rtw89_fw_recognize_features(rtwdev); 978 979 rtw89_coex_recognize_ver(rtwdev); 980 981 return 0; 982 } 983 984 static 985 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 986 const struct rtw89_fw_element_hdr *elm, 987 const union rtw89_fw_element_arg arg) 988 { 989 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 990 struct rtw89_phy_table *tbl; 991 struct rtw89_reg2_def *regs; 992 enum rtw89_rf_path rf_path; 993 u32 n_regs, i; 994 u8 idx; 995 996 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 997 if (!tbl) 998 return -ENOMEM; 999 1000 switch (le32_to_cpu(elm->id)) { 1001 case RTW89_FW_ELEMENT_ID_BB_REG: 1002 elm_info->bb_tbl = tbl; 1003 break; 1004 case RTW89_FW_ELEMENT_ID_BB_GAIN: 1005 elm_info->bb_gain = tbl; 1006 break; 1007 case RTW89_FW_ELEMENT_ID_RADIO_A: 1008 case RTW89_FW_ELEMENT_ID_RADIO_B: 1009 case RTW89_FW_ELEMENT_ID_RADIO_C: 1010 case RTW89_FW_ELEMENT_ID_RADIO_D: 1011 rf_path = arg.rf_path; 1012 idx = elm->u.reg2.idx; 1013 1014 elm_info->rf_radio[idx] = tbl; 1015 tbl->rf_path = rf_path; 1016 tbl->config = rtw89_phy_config_rf_reg_v1; 1017 break; 1018 case RTW89_FW_ELEMENT_ID_RF_NCTL: 1019 elm_info->rf_nctl = tbl; 1020 break; 1021 default: 1022 kfree(tbl); 1023 return -ENOENT; 1024 } 1025 1026 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 1027 regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL); 1028 if (!regs) 1029 goto out; 1030 1031 for (i = 0; i < n_regs; i++) { 1032 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 1033 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 1034 } 1035 1036 tbl->n_regs = n_regs; 1037 tbl->regs = regs; 1038 1039 return 0; 1040 1041 out: 1042 kfree(tbl); 1043 return -ENOMEM; 1044 } 1045 1046 static 1047 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 1048 const struct rtw89_fw_element_hdr *elm, 1049 const union rtw89_fw_element_arg arg) 1050 { 1051 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 1052 const unsigned long offset = arg.offset; 1053 struct rtw89_efuse *efuse = &rtwdev->efuse; 1054 struct rtw89_txpwr_conf *conf; 1055 1056 if (!rtwdev->rfe_data) { 1057 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 1058 if (!rtwdev->rfe_data) 1059 return -ENOMEM; 1060 } 1061 1062 conf = (void *)rtwdev->rfe_data + offset; 1063 1064 /* if multiple matched, take the last eventually */ 1065 if (txpwr_elm->rfe_type == efuse->rfe_type) 1066 goto setup; 1067 1068 /* without one is matched, accept default */ 1069 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 1070 (!rtw89_txpwr_conf_valid(conf) || 1071 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 1072 goto setup; 1073 1074 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 1075 elm->id, txpwr_elm->rfe_type); 1076 return 0; 1077 1078 setup: 1079 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 1080 elm->id, txpwr_elm->rfe_type); 1081 1082 conf->rfe_type = txpwr_elm->rfe_type; 1083 conf->ent_sz = txpwr_elm->ent_sz; 1084 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 1085 conf->data = txpwr_elm->content; 1086 return 0; 1087 } 1088 1089 static 1090 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 1091 const struct rtw89_fw_element_hdr *elm, 1092 const union rtw89_fw_element_arg arg) 1093 { 1094 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1095 const struct rtw89_chip_info *chip = rtwdev->chip; 1096 u32 needed_bitmap = 0; 1097 u32 offset = 0; 1098 int subband; 1099 u32 bitmap; 1100 int type; 1101 1102 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 1103 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 1104 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 1105 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 1106 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 1107 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 1108 1109 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 1110 1111 if ((bitmap & needed_bitmap) != needed_bitmap) { 1112 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n", 1113 needed_bitmap, bitmap); 1114 return -ENOENT; 1115 } 1116 1117 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 1118 if (!elm_info->txpwr_trk) 1119 return -ENOMEM; 1120 1121 for (type = 0; bitmap; type++, bitmap >>= 1) { 1122 if (!(bitmap & BIT(0))) 1123 continue; 1124 1125 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 1126 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 1127 subband = 4; 1128 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 1129 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 1130 subband = 3; 1131 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 1132 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 1133 subband = 1; 1134 else 1135 break; 1136 1137 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 1138 1139 offset += subband; 1140 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 1141 goto err; 1142 } 1143 1144 return 0; 1145 1146 err: 1147 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1148 offset, le32_to_cpu(elm->size)); 1149 kfree(elm_info->txpwr_trk); 1150 elm_info->txpwr_trk = NULL; 1151 1152 return -EFAULT; 1153 } 1154 1155 static 1156 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1157 const struct rtw89_fw_element_hdr *elm, 1158 const union rtw89_fw_element_arg arg) 1159 { 1160 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1161 u8 rfk_id; 1162 1163 if (elm_info->rfk_log_fmt) 1164 goto allocated; 1165 1166 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1167 if (!elm_info->rfk_log_fmt) 1168 return 1; /* this is an optional element, so just ignore this */ 1169 1170 allocated: 1171 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1172 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1173 return 1; 1174 1175 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1176 1177 return 0; 1178 } 1179 1180 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor, 1181 u8 cursor_size) 1182 { 1183 /* fill default values if needed for backward compatibility */ 1184 struct rtw89_fw_regd_entry entry = { 1185 .rule_2ghz = RTW89_NA, 1186 .rule_5ghz = RTW89_NA, 1187 .rule_6ghz = RTW89_NA, 1188 .fmap = cpu_to_le32(0x0), 1189 }; 1190 u8 valid_size = min_t(u8, sizeof(entry), cursor_size); 1191 unsigned int i; 1192 u32 fmap; 1193 1194 memcpy(&entry, cursor, valid_size); 1195 memset(regd, 0, sizeof(*regd)); 1196 1197 regd->alpha2[0] = entry.alpha2_0; 1198 regd->alpha2[1] = entry.alpha2_1; 1199 regd->alpha2[2] = '\0'; 1200 1201 /* also need to consider forward compatibility */ 1202 regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ? 1203 entry.rule_2ghz : RTW89_NA; 1204 regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ? 1205 entry.rule_5ghz : RTW89_NA; 1206 regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ? 1207 entry.rule_6ghz : RTW89_NA; 1208 1209 BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap)); 1210 BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC); 1211 1212 fmap = le32_to_cpu(entry.fmap); 1213 for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) { 1214 if (fmap & BIT(i)) 1215 set_bit(i, regd->func_bitmap); 1216 } 1217 1218 return true; 1219 } 1220 1221 #define rtw89_for_each_in_regd_element(regd, element) \ 1222 for (const void *cursor = (element)->content, \ 1223 *end = (element)->content + \ 1224 le32_to_cpu((element)->num_ents) * (element)->ent_sz; \ 1225 cursor < end; cursor += (element)->ent_sz) \ 1226 if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz)) 1227 1228 static 1229 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev, 1230 const struct rtw89_fw_element_hdr *elm, 1231 const union rtw89_fw_element_arg arg) 1232 { 1233 const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd; 1234 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1235 u32 num_ents = le32_to_cpu(regd_elm->num_ents); 1236 struct rtw89_regd_data *p; 1237 struct rtw89_regd regd; 1238 u32 i = 0; 1239 1240 if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) { 1241 rtw89_warn(rtwdev, 1242 "regd element ents (%d) are over max num (%d)\n", 1243 num_ents, RTW89_REGD_MAX_COUNTRY_NUM); 1244 rtw89_warn(rtwdev, 1245 "regd element ignore and take another/common\n"); 1246 return 1; 1247 } 1248 1249 if (elm_info->regd) { 1250 rtw89_debug(rtwdev, RTW89_DBG_REGD, 1251 "regd element take the latter\n"); 1252 devm_kfree(rtwdev->dev, elm_info->regd); 1253 elm_info->regd = NULL; 1254 } 1255 1256 p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL); 1257 if (!p) 1258 return -ENOMEM; 1259 1260 p->nr = num_ents; 1261 rtw89_for_each_in_regd_element(®d, regd_elm) 1262 p->map[i++] = regd; 1263 1264 if (i != num_ents) { 1265 rtw89_err(rtwdev, "regd element has %d invalid ents\n", 1266 num_ents - i); 1267 devm_kfree(rtwdev->dev, p); 1268 return -EINVAL; 1269 } 1270 1271 elm_info->regd = p; 1272 return 0; 1273 } 1274 1275 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1276 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1277 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1278 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1279 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1280 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1281 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1282 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1283 { .rf_path = RF_PATH_A }, "radio A"}, 1284 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1285 { .rf_path = RF_PATH_B }, NULL}, 1286 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1287 { .rf_path = RF_PATH_C }, NULL}, 1288 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1289 { .rf_path = RF_PATH_D }, NULL}, 1290 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1291 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1292 rtw89_fw_recognize_txpwr_from_elm, 1293 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1294 }, 1295 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1296 rtw89_fw_recognize_txpwr_from_elm, 1297 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1298 }, 1299 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1300 rtw89_fw_recognize_txpwr_from_elm, 1301 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1302 }, 1303 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1304 rtw89_fw_recognize_txpwr_from_elm, 1305 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1306 }, 1307 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = { 1308 rtw89_fw_recognize_txpwr_from_elm, 1309 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL, 1310 }, 1311 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = { 1312 rtw89_fw_recognize_txpwr_from_elm, 1313 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL, 1314 }, 1315 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = { 1316 rtw89_fw_recognize_txpwr_from_elm, 1317 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL, 1318 }, 1319 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1320 rtw89_fw_recognize_txpwr_from_elm, 1321 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1322 }, 1323 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1324 rtw89_fw_recognize_txpwr_from_elm, 1325 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1326 }, 1327 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1328 rtw89_fw_recognize_txpwr_from_elm, 1329 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1330 }, 1331 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = { 1332 rtw89_fw_recognize_txpwr_from_elm, 1333 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL, 1334 }, 1335 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = { 1336 rtw89_fw_recognize_txpwr_from_elm, 1337 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL, 1338 }, 1339 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = { 1340 rtw89_fw_recognize_txpwr_from_elm, 1341 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL, 1342 }, 1343 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1344 rtw89_fw_recognize_txpwr_from_elm, 1345 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1346 }, 1347 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1348 rtw89_fw_recognize_txpwr_from_elm, 1349 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1350 }, 1351 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1352 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1353 }, 1354 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1355 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1356 }, 1357 [RTW89_FW_ELEMENT_ID_REGD] = { 1358 rtw89_recognize_regd_from_elm, {}, "REGD", 1359 }, 1360 }; 1361 1362 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1363 { 1364 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1365 const struct firmware *firmware = fw_info->req.firmware; 1366 const struct rtw89_chip_info *chip = rtwdev->chip; 1367 u32 unrecognized_elements = chip->needed_fw_elms; 1368 const struct rtw89_fw_element_handler *handler; 1369 const struct rtw89_fw_element_hdr *hdr; 1370 u32 elm_size; 1371 u32 elem_id; 1372 u32 offset; 1373 int ret; 1374 1375 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1376 1377 offset = rtw89_mfw_get_size(rtwdev); 1378 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1379 if (offset == 0) 1380 return -EINVAL; 1381 1382 while (offset + sizeof(*hdr) < firmware->size) { 1383 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1384 1385 elm_size = le32_to_cpu(hdr->size); 1386 if (offset + elm_size >= firmware->size) { 1387 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1388 break; 1389 } 1390 1391 elem_id = le32_to_cpu(hdr->id); 1392 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1393 goto next; 1394 1395 handler = &__fw_element_handlers[elem_id]; 1396 if (!handler->fn) 1397 goto next; 1398 1399 ret = handler->fn(rtwdev, hdr, handler->arg); 1400 if (ret == 1) /* ignore this element */ 1401 goto next; 1402 if (ret) 1403 return ret; 1404 1405 if (handler->name) 1406 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1407 handler->name, hdr->ver); 1408 1409 unrecognized_elements &= ~BIT(elem_id); 1410 next: 1411 offset += sizeof(*hdr) + elm_size; 1412 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1413 } 1414 1415 if (unrecognized_elements) { 1416 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1417 unrecognized_elements); 1418 return -ENOENT; 1419 } 1420 1421 return 0; 1422 } 1423 1424 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1425 u8 type, u8 cat, u8 class, u8 func, 1426 bool rack, bool dack, u32 len) 1427 { 1428 struct fwcmd_hdr *hdr; 1429 1430 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1431 1432 if (!(rtwdev->fw.h2c_seq % 4)) 1433 rack = true; 1434 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1435 FIELD_PREP(H2C_HDR_CAT, cat) | 1436 FIELD_PREP(H2C_HDR_CLASS, class) | 1437 FIELD_PREP(H2C_HDR_FUNC, func) | 1438 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1439 1440 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1441 len + H2C_HEADER_LEN) | 1442 (rack ? H2C_HDR_REC_ACK : 0) | 1443 (dack ? H2C_HDR_DONE_ACK : 0)); 1444 1445 rtwdev->fw.h2c_seq++; 1446 } 1447 1448 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1449 struct sk_buff *skb, 1450 u8 type, u8 cat, u8 class, u8 func, 1451 u32 len) 1452 { 1453 struct fwcmd_hdr *hdr; 1454 1455 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1456 1457 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1458 FIELD_PREP(H2C_HDR_CAT, cat) | 1459 FIELD_PREP(H2C_HDR_CLASS, class) | 1460 FIELD_PREP(H2C_HDR_FUNC, func) | 1461 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1462 1463 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1464 len + H2C_HEADER_LEN)); 1465 } 1466 1467 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1468 struct rtw89_fw_bin_info *info, 1469 struct rtw89_fw_hdr *fw_hdr) 1470 { 1471 struct rtw89_fw_hdr_section_info *section_info; 1472 struct rtw89_fw_hdr_section *section; 1473 int i; 1474 1475 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1476 FW_HDR_W7_PART_SIZE); 1477 1478 for (i = 0; i < info->section_num; i++) { 1479 section_info = &info->section_info[i]; 1480 1481 if (!section_info->len_override) 1482 continue; 1483 1484 section = &fw_hdr->sections[i]; 1485 le32p_replace_bits(§ion->w1, section_info->len_override, 1486 FWSECTION_HDR_W1_SEC_SIZE); 1487 } 1488 1489 return 0; 1490 } 1491 1492 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1493 struct rtw89_fw_bin_info *info, 1494 struct rtw89_fw_hdr_v1 *fw_hdr) 1495 { 1496 struct rtw89_fw_hdr_section_info *section_info; 1497 struct rtw89_fw_hdr_section_v1 *section; 1498 u8 dst_sec_idx = 0; 1499 u8 sec_idx; 1500 1501 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1502 FW_HDR_V1_W7_PART_SIZE); 1503 1504 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1505 section_info = &info->section_info[sec_idx]; 1506 section = &fw_hdr->sections[sec_idx]; 1507 1508 if (section_info->ignore) 1509 continue; 1510 1511 if (dst_sec_idx != sec_idx) 1512 fw_hdr->sections[dst_sec_idx] = *section; 1513 1514 dst_sec_idx++; 1515 } 1516 1517 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1518 1519 return (info->section_num - dst_sec_idx) * sizeof(*section); 1520 } 1521 1522 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1523 const struct rtw89_fw_suit *fw_suit, 1524 struct rtw89_fw_bin_info *info) 1525 { 1526 u32 len = info->hdr_len - info->dynamic_hdr_len; 1527 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1528 const u8 *fw = fw_suit->data; 1529 struct rtw89_fw_hdr *fw_hdr; 1530 struct sk_buff *skb; 1531 u32 truncated; 1532 u32 ret = 0; 1533 1534 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1535 if (!skb) { 1536 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1537 return -ENOMEM; 1538 } 1539 1540 skb_put_data(skb, fw, len); 1541 1542 switch (fw_suit->hdr_ver) { 1543 case 0: 1544 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1545 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1546 break; 1547 case 1: 1548 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1549 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1550 break; 1551 default: 1552 ret = -EOPNOTSUPP; 1553 goto fail; 1554 } 1555 1556 if (truncated) { 1557 len -= truncated; 1558 skb_trim(skb, len); 1559 } 1560 1561 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1562 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1563 H2C_FUNC_MAC_FWHDR_DL, len); 1564 1565 ret = rtw89_h2c_tx(rtwdev, skb, false); 1566 if (ret) { 1567 rtw89_err(rtwdev, "failed to send h2c\n"); 1568 goto fail; 1569 } 1570 1571 return 0; 1572 fail: 1573 dev_kfree_skb_any(skb); 1574 1575 return ret; 1576 } 1577 1578 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1579 const struct rtw89_fw_suit *fw_suit, 1580 struct rtw89_fw_bin_info *info) 1581 { 1582 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1583 int ret; 1584 1585 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1586 if (ret) { 1587 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1588 return ret; 1589 } 1590 1591 ret = mac->fwdl_check_path_ready(rtwdev, false); 1592 if (ret) { 1593 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1594 return ret; 1595 } 1596 1597 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1598 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1599 1600 return 0; 1601 } 1602 1603 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1604 struct rtw89_fw_hdr_section_info *info) 1605 { 1606 struct sk_buff *skb; 1607 const u8 *section = info->addr; 1608 u32 residue_len = info->len; 1609 bool copy_key = false; 1610 u32 pkt_len; 1611 int ret; 1612 1613 if (info->ignore) 1614 return 0; 1615 1616 if (info->len_override) { 1617 if (info->len_override > info->len) 1618 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1619 info->len_override, info->len); 1620 else 1621 residue_len = info->len_override; 1622 } 1623 1624 if (info->key_addr && info->key_len) { 1625 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1626 rtw89_warn(rtwdev, 1627 "ignore to copy key data because of len %d, %d, %d, %d\n", 1628 info->len, FWDL_SECTION_PER_PKT_LEN, 1629 info->key_len, residue_len); 1630 else 1631 copy_key = true; 1632 } 1633 1634 while (residue_len) { 1635 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1636 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1637 else 1638 pkt_len = residue_len; 1639 1640 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1641 if (!skb) { 1642 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1643 return -ENOMEM; 1644 } 1645 skb_put_data(skb, section, pkt_len); 1646 1647 if (copy_key) 1648 memcpy(skb->data + pkt_len - info->key_len, 1649 info->key_addr, info->key_len); 1650 1651 ret = rtw89_h2c_tx(rtwdev, skb, true); 1652 if (ret) { 1653 rtw89_err(rtwdev, "failed to send h2c\n"); 1654 goto fail; 1655 } 1656 1657 section += pkt_len; 1658 residue_len -= pkt_len; 1659 } 1660 1661 return 0; 1662 fail: 1663 dev_kfree_skb_any(skb); 1664 1665 return ret; 1666 } 1667 1668 static enum rtw89_fwdl_check_type 1669 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1670 const struct rtw89_fw_suit *fw_suit) 1671 { 1672 switch (fw_suit->type) { 1673 case RTW89_FW_BBMCU0: 1674 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1675 case RTW89_FW_BBMCU1: 1676 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1677 default: 1678 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1679 } 1680 } 1681 1682 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1683 const struct rtw89_fw_suit *fw_suit, 1684 struct rtw89_fw_bin_info *info) 1685 { 1686 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1687 const struct rtw89_chip_info *chip = rtwdev->chip; 1688 enum rtw89_fwdl_check_type chk_type; 1689 u8 section_num = info->section_num; 1690 int ret; 1691 1692 while (section_num--) { 1693 ret = __rtw89_fw_download_main(rtwdev, section_info); 1694 if (ret) 1695 return ret; 1696 section_info++; 1697 } 1698 1699 if (chip->chip_gen == RTW89_CHIP_AX) 1700 return 0; 1701 1702 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1703 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1704 if (ret) { 1705 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1706 fw_suit->type); 1707 return ret; 1708 } 1709 1710 return 0; 1711 } 1712 1713 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1714 { 1715 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1716 u32 addr = R_AX_DBG_PORT_SEL; 1717 u32 val32; 1718 u16 index; 1719 1720 if (chip_gen == RTW89_CHIP_BE) { 1721 addr = R_BE_WLCPU_PORT_PC; 1722 goto dump; 1723 } 1724 1725 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1726 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1727 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1728 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1729 1730 dump: 1731 for (index = 0; index < 15; index++) { 1732 val32 = rtw89_read32(rtwdev, addr); 1733 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1734 fsleep(10); 1735 } 1736 } 1737 1738 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1739 { 1740 u32 val32; 1741 1742 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1743 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1744 1745 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1746 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1747 1748 rtw89_fw_prog_cnt_dump(rtwdev); 1749 } 1750 1751 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1752 struct rtw89_fw_suit *fw_suit) 1753 { 1754 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1755 struct rtw89_fw_bin_info info = {}; 1756 int ret; 1757 1758 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1759 if (ret) { 1760 rtw89_err(rtwdev, "parse fw header fail\n"); 1761 return ret; 1762 } 1763 1764 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1765 1766 if (rtwdev->chip->chip_id == RTL8922A && 1767 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1768 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1769 1770 ret = mac->fwdl_check_path_ready(rtwdev, true); 1771 if (ret) { 1772 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1773 return ret; 1774 } 1775 1776 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1777 if (ret) 1778 return ret; 1779 1780 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1781 if (ret) 1782 return ret; 1783 1784 return 0; 1785 } 1786 1787 static 1788 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1789 bool include_bb) 1790 { 1791 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1792 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1793 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1794 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1795 int ret; 1796 int i; 1797 1798 mac->disable_cpu(rtwdev); 1799 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1800 if (ret) 1801 return ret; 1802 1803 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1804 if (ret) 1805 goto fwdl_err; 1806 1807 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1808 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1809 1810 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1811 if (ret) 1812 goto fwdl_err; 1813 } 1814 1815 fw_info->h2c_seq = 0; 1816 fw_info->rec_seq = 0; 1817 fw_info->h2c_counter = 0; 1818 fw_info->c2h_counter = 0; 1819 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1820 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1821 1822 mdelay(5); 1823 1824 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1825 if (ret) { 1826 rtw89_warn(rtwdev, "download firmware fail\n"); 1827 goto fwdl_err; 1828 } 1829 1830 return ret; 1831 1832 fwdl_err: 1833 rtw89_fw_dl_fail_dump(rtwdev); 1834 return ret; 1835 } 1836 1837 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1838 bool include_bb) 1839 { 1840 int retry; 1841 int ret; 1842 1843 for (retry = 0; retry < 5; retry++) { 1844 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1845 if (!ret) 1846 return 0; 1847 } 1848 1849 return ret; 1850 } 1851 1852 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1853 { 1854 struct rtw89_fw_info *fw = &rtwdev->fw; 1855 1856 wait_for_completion(&fw->req.completion); 1857 if (!fw->req.firmware) 1858 return -EINVAL; 1859 1860 return 0; 1861 } 1862 1863 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1864 struct rtw89_fw_req_info *req, 1865 const char *fw_name, bool nowarn) 1866 { 1867 int ret; 1868 1869 if (req->firmware) { 1870 rtw89_debug(rtwdev, RTW89_DBG_FW, 1871 "full firmware has been early requested\n"); 1872 complete_all(&req->completion); 1873 return 0; 1874 } 1875 1876 if (nowarn) 1877 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1878 else 1879 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1880 1881 complete_all(&req->completion); 1882 1883 return ret; 1884 } 1885 1886 void rtw89_load_firmware_work(struct work_struct *work) 1887 { 1888 struct rtw89_dev *rtwdev = 1889 container_of(work, struct rtw89_dev, load_firmware_work); 1890 const struct rtw89_chip_info *chip = rtwdev->chip; 1891 char fw_name[64]; 1892 1893 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1894 chip->fw_basename, rtwdev->fw.fw_format); 1895 1896 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1897 } 1898 1899 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1900 { 1901 if (!tbl) 1902 return; 1903 1904 kfree(tbl->regs); 1905 kfree(tbl); 1906 } 1907 1908 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1909 { 1910 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1911 int i; 1912 1913 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1914 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1915 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1916 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1917 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1918 1919 kfree(elm_info->txpwr_trk); 1920 kfree(elm_info->rfk_log_fmt); 1921 } 1922 1923 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1924 { 1925 struct rtw89_fw_info *fw = &rtwdev->fw; 1926 1927 cancel_work_sync(&rtwdev->load_firmware_work); 1928 1929 if (fw->req.firmware) { 1930 release_firmware(fw->req.firmware); 1931 1932 /* assign NULL back in case rtw89_free_ieee80211_hw() 1933 * try to release the same one again. 1934 */ 1935 fw->req.firmware = NULL; 1936 } 1937 1938 kfree(fw->log.fmts); 1939 rtw89_unload_firmware_elements(rtwdev); 1940 } 1941 1942 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1943 { 1944 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1945 u32 i; 1946 1947 if (fmt_id > fw_log->last_fmt_id) 1948 return 0; 1949 1950 for (i = 0; i < fw_log->fmt_count; i++) { 1951 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1952 return i; 1953 } 1954 return 0; 1955 } 1956 1957 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1958 { 1959 struct rtw89_fw_log *log = &rtwdev->fw.log; 1960 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1961 struct rtw89_fw_suit *suit = &log->suit; 1962 const void *fmts_ptr, *fmts_end_ptr; 1963 u32 fmt_count; 1964 int i; 1965 1966 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1967 fmt_count = le32_to_cpu(suit_hdr->count); 1968 log->fmt_ids = suit_hdr->ids; 1969 fmts_ptr = &suit_hdr->ids[fmt_count]; 1970 fmts_end_ptr = suit->data + suit->size; 1971 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1972 if (!log->fmts) 1973 return -ENOMEM; 1974 1975 for (i = 0; i < fmt_count; i++) { 1976 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1977 if (!fmts_ptr) 1978 break; 1979 1980 (*log->fmts)[i] = fmts_ptr; 1981 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1982 log->fmt_count++; 1983 fmts_ptr += strlen(fmts_ptr); 1984 } 1985 1986 return 0; 1987 } 1988 1989 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1990 { 1991 struct rtw89_fw_log *log = &rtwdev->fw.log; 1992 struct rtw89_fw_suit *suit = &log->suit; 1993 1994 if (!suit || !suit->data) { 1995 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1996 return -EINVAL; 1997 } 1998 if (log->fmts) 1999 return 0; 2000 2001 return rtw89_fw_log_create_fmts_dict(rtwdev); 2002 } 2003 2004 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 2005 const struct rtw89_fw_c2h_log_fmt *log_fmt, 2006 u32 fmt_idx, u8 para_int, bool raw_data) 2007 { 2008 const char *(*fmts)[] = rtwdev->fw.log.fmts; 2009 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 2010 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 2011 int i; 2012 2013 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 2014 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 2015 log_fmt->argc); 2016 return; 2017 } 2018 2019 if (para_int) 2020 for (i = 0 ; i < log_fmt->argc; i++) 2021 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 2022 2023 if (raw_data) { 2024 if (para_int) 2025 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 2026 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 2027 para_int, log_fmt->argc, (int)sizeof(args), args); 2028 else 2029 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 2030 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 2031 para_int, log_fmt->argc, log_fmt->u.raw); 2032 } else { 2033 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 2034 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 2035 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 2036 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 2037 args[0xf]); 2038 } 2039 2040 rtw89_info(rtwdev, "C2H log: %s", str_buf); 2041 } 2042 2043 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 2044 { 2045 const struct rtw89_fw_c2h_log_fmt *log_fmt; 2046 u8 para_int; 2047 u32 fmt_idx; 2048 2049 if (len < RTW89_C2H_HEADER_LEN) { 2050 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 2051 return; 2052 } 2053 2054 buf += RTW89_C2H_HEADER_LEN; 2055 len -= RTW89_C2H_HEADER_LEN; 2056 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 2057 2058 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 2059 goto plain_log; 2060 2061 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 2062 goto plain_log; 2063 2064 if (!rtwdev->fw.log.fmts) 2065 return; 2066 2067 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 2068 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 2069 2070 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 2071 rtw89_info(rtwdev, "C2H log: %s%s", 2072 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 2073 else if (fmt_idx != 0 && para_int) 2074 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 2075 else 2076 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 2077 return; 2078 2079 plain_log: 2080 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 2081 2082 } 2083 2084 #define H2C_CAM_LEN 60 2085 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 2086 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr) 2087 { 2088 struct sk_buff *skb; 2089 int ret; 2090 2091 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 2092 if (!skb) { 2093 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2094 return -ENOMEM; 2095 } 2096 skb_put(skb, H2C_CAM_LEN); 2097 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr, 2098 skb->data); 2099 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data); 2100 2101 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2102 H2C_CAT_MAC, 2103 H2C_CL_MAC_ADDR_CAM_UPDATE, 2104 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 2105 H2C_CAM_LEN); 2106 2107 ret = rtw89_h2c_tx(rtwdev, skb, false); 2108 if (ret) { 2109 rtw89_err(rtwdev, "failed to send h2c\n"); 2110 goto fail; 2111 } 2112 2113 return 0; 2114 fail: 2115 dev_kfree_skb_any(skb); 2116 2117 return ret; 2118 } 2119 2120 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 2121 struct rtw89_vif_link *rtwvif_link, 2122 struct rtw89_sta_link *rtwsta_link) 2123 { 2124 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 2125 u32 len = sizeof(*h2c); 2126 struct sk_buff *skb; 2127 int ret; 2128 2129 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2130 if (!skb) { 2131 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2132 return -ENOMEM; 2133 } 2134 skb_put(skb, len); 2135 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 2136 2137 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 2138 2139 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2140 H2C_CAT_MAC, 2141 H2C_CL_MAC_FR_EXCHG, 2142 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 2143 len); 2144 2145 ret = rtw89_h2c_tx(rtwdev, skb, false); 2146 if (ret) { 2147 rtw89_err(rtwdev, "failed to send h2c\n"); 2148 goto fail; 2149 } 2150 2151 return 0; 2152 fail: 2153 dev_kfree_skb_any(skb); 2154 2155 return ret; 2156 } 2157 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 2158 2159 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 2160 struct rtw89_vif_link *rtwvif_link, 2161 struct rtw89_sta_link *rtwsta_link) 2162 { 2163 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2164 u32 len = sizeof(*h2c); 2165 struct sk_buff *skb; 2166 int ret; 2167 2168 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2169 if (!skb) { 2170 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2171 return -ENOMEM; 2172 } 2173 skb_put(skb, len); 2174 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2175 2176 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 2177 2178 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2179 H2C_CAT_MAC, 2180 H2C_CL_MAC_FR_EXCHG, 2181 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2182 len); 2183 2184 ret = rtw89_h2c_tx(rtwdev, skb, false); 2185 if (ret) { 2186 rtw89_err(rtwdev, "failed to send h2c\n"); 2187 goto fail; 2188 } 2189 2190 return 0; 2191 fail: 2192 dev_kfree_skb_any(skb); 2193 2194 return ret; 2195 } 2196 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 2197 2198 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 2199 struct rtw89_vif_link *rtwvif_link, 2200 struct rtw89_sta_link *rtwsta_link) 2201 { 2202 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2203 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2204 u32 len = sizeof(*h2c); 2205 struct sk_buff *skb; 2206 int ret; 2207 2208 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2209 if (!skb) { 2210 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 2211 return -ENOMEM; 2212 } 2213 skb_put(skb, len); 2214 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2215 2216 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 2217 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 2218 2219 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 2220 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 2221 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 2222 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 2223 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 2224 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 2225 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 2226 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 2227 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 2228 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 2229 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 2230 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 2231 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 2232 2233 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2234 H2C_CAT_MAC, 2235 H2C_CL_MAC_FR_EXCHG, 2236 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2237 len); 2238 2239 ret = rtw89_h2c_tx(rtwdev, skb, false); 2240 if (ret) { 2241 rtw89_err(rtwdev, "failed to send h2c\n"); 2242 goto fail; 2243 } 2244 2245 return 0; 2246 fail: 2247 dev_kfree_skb_any(skb); 2248 2249 return ret; 2250 } 2251 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 2252 2253 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 2254 struct rtw89_vif_link *rtwvif_link, 2255 struct rtw89_sta_link *rtwsta_link, 2256 bool valid, struct ieee80211_ampdu_params *params) 2257 { 2258 const struct rtw89_chip_info *chip = rtwdev->chip; 2259 struct rtw89_h2c_ba_cam *h2c; 2260 u8 macid = rtwsta_link->mac_id; 2261 u32 len = sizeof(*h2c); 2262 struct sk_buff *skb; 2263 u8 entry_idx; 2264 int ret; 2265 2266 ret = valid ? 2267 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2268 &entry_idx) : 2269 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2270 &entry_idx); 2271 if (ret) { 2272 /* it still works even if we don't have static BA CAM, because 2273 * hardware can create dynamic BA CAM automatically. 2274 */ 2275 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2276 "failed to %s entry tid=%d for h2c ba cam\n", 2277 valid ? "alloc" : "free", params->tid); 2278 return 0; 2279 } 2280 2281 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2282 if (!skb) { 2283 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2284 return -ENOMEM; 2285 } 2286 skb_put(skb, len); 2287 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2288 2289 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2290 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2291 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2292 else 2293 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2294 if (!valid) 2295 goto end; 2296 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2297 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2298 if (params->buf_size > 64) 2299 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2300 else 2301 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2302 /* If init req is set, hw will set the ssn */ 2303 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2304 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2305 2306 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2307 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2308 le32_encode_bits(rtwvif_link->mac_idx, 2309 RTW89_H2C_BA_CAM_W1_BAND); 2310 } 2311 2312 end: 2313 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2314 H2C_CAT_MAC, 2315 H2C_CL_BA_CAM, 2316 H2C_FUNC_MAC_BA_CAM, 0, 1, 2317 len); 2318 2319 ret = rtw89_h2c_tx(rtwdev, skb, false); 2320 if (ret) { 2321 rtw89_err(rtwdev, "failed to send h2c\n"); 2322 goto fail; 2323 } 2324 2325 return 0; 2326 fail: 2327 dev_kfree_skb_any(skb); 2328 2329 return ret; 2330 } 2331 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2332 2333 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2334 u8 entry_idx, u8 uid) 2335 { 2336 struct rtw89_h2c_ba_cam *h2c; 2337 u32 len = sizeof(*h2c); 2338 struct sk_buff *skb; 2339 int ret; 2340 2341 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2342 if (!skb) { 2343 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2344 return -ENOMEM; 2345 } 2346 skb_put(skb, len); 2347 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2348 2349 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2350 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2351 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2352 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2353 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2354 2355 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2356 H2C_CAT_MAC, 2357 H2C_CL_BA_CAM, 2358 H2C_FUNC_MAC_BA_CAM, 0, 1, 2359 len); 2360 2361 ret = rtw89_h2c_tx(rtwdev, skb, false); 2362 if (ret) { 2363 rtw89_err(rtwdev, "failed to send h2c\n"); 2364 goto fail; 2365 } 2366 2367 return 0; 2368 fail: 2369 dev_kfree_skb_any(skb); 2370 2371 return ret; 2372 } 2373 2374 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2375 { 2376 const struct rtw89_chip_info *chip = rtwdev->chip; 2377 u8 entry_idx = chip->bacam_num; 2378 u8 uid = 0; 2379 int i; 2380 2381 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2382 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2383 entry_idx++; 2384 uid++; 2385 } 2386 } 2387 2388 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2389 struct rtw89_vif_link *rtwvif_link, 2390 struct rtw89_sta_link *rtwsta_link, 2391 bool valid, struct ieee80211_ampdu_params *params) 2392 { 2393 const struct rtw89_chip_info *chip = rtwdev->chip; 2394 struct rtw89_h2c_ba_cam_v1 *h2c; 2395 u8 macid = rtwsta_link->mac_id; 2396 u32 len = sizeof(*h2c); 2397 struct sk_buff *skb; 2398 u8 entry_idx; 2399 u8 bmap_size; 2400 int ret; 2401 2402 ret = valid ? 2403 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2404 &entry_idx) : 2405 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2406 &entry_idx); 2407 if (ret) { 2408 /* it still works even if we don't have static BA CAM, because 2409 * hardware can create dynamic BA CAM automatically. 2410 */ 2411 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2412 "failed to %s entry tid=%d for h2c ba cam\n", 2413 valid ? "alloc" : "free", params->tid); 2414 return 0; 2415 } 2416 2417 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2418 if (!skb) { 2419 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2420 return -ENOMEM; 2421 } 2422 skb_put(skb, len); 2423 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2424 2425 if (params->buf_size > 512) 2426 bmap_size = 10; 2427 else if (params->buf_size > 256) 2428 bmap_size = 8; 2429 else if (params->buf_size > 64) 2430 bmap_size = 4; 2431 else 2432 bmap_size = 0; 2433 2434 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2435 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2436 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2437 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2438 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2439 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2440 2441 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2442 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2443 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2444 le32_encode_bits(!!rtwvif_link->mac_idx, 2445 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2446 2447 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2448 H2C_CAT_MAC, 2449 H2C_CL_BA_CAM, 2450 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2451 len); 2452 2453 ret = rtw89_h2c_tx(rtwdev, skb, false); 2454 if (ret) { 2455 rtw89_err(rtwdev, "failed to send h2c\n"); 2456 goto fail; 2457 } 2458 2459 return 0; 2460 fail: 2461 dev_kfree_skb_any(skb); 2462 2463 return ret; 2464 } 2465 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2466 2467 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2468 u8 offset, u8 mac_idx) 2469 { 2470 struct rtw89_h2c_ba_cam_init *h2c; 2471 u32 len = sizeof(*h2c); 2472 struct sk_buff *skb; 2473 int ret; 2474 2475 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2476 if (!skb) { 2477 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2478 return -ENOMEM; 2479 } 2480 skb_put(skb, len); 2481 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2482 2483 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2484 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2485 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2486 2487 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2488 H2C_CAT_MAC, 2489 H2C_CL_BA_CAM, 2490 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2491 len); 2492 2493 ret = rtw89_h2c_tx(rtwdev, skb, false); 2494 if (ret) { 2495 rtw89_err(rtwdev, "failed to send h2c\n"); 2496 goto fail; 2497 } 2498 2499 return 0; 2500 fail: 2501 dev_kfree_skb_any(skb); 2502 2503 return ret; 2504 } 2505 2506 #define H2C_LOG_CFG_LEN 12 2507 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2508 { 2509 struct sk_buff *skb; 2510 u32 comp = 0; 2511 int ret; 2512 2513 if (enable) 2514 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2515 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2516 BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN); 2517 2518 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2519 if (!skb) { 2520 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2521 return -ENOMEM; 2522 } 2523 2524 skb_put(skb, H2C_LOG_CFG_LEN); 2525 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2526 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2527 SET_LOG_CFG_COMP(skb->data, comp); 2528 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2529 2530 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2531 H2C_CAT_MAC, 2532 H2C_CL_FW_INFO, 2533 H2C_FUNC_LOG_CFG, 0, 0, 2534 H2C_LOG_CFG_LEN); 2535 2536 ret = rtw89_h2c_tx(rtwdev, skb, false); 2537 if (ret) { 2538 rtw89_err(rtwdev, "failed to send h2c\n"); 2539 goto fail; 2540 } 2541 2542 return 0; 2543 fail: 2544 dev_kfree_skb_any(skb); 2545 2546 return ret; 2547 } 2548 2549 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2550 struct rtw89_vif_link *rtwvif_link) 2551 { 2552 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2553 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2554 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2555 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2556 struct rtw89_eapol_2_of_2 *eapol_pkt; 2557 struct ieee80211_bss_conf *bss_conf; 2558 struct ieee80211_hdr_3addr *hdr; 2559 struct sk_buff *skb; 2560 u8 key_des_ver; 2561 2562 if (rtw_wow->ptk_alg == 3) 2563 key_des_ver = 1; 2564 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2565 key_des_ver = 2; 2566 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2567 key_des_ver = 3; 2568 else 2569 key_des_ver = 0; 2570 2571 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2572 if (!skb) 2573 return NULL; 2574 2575 hdr = skb_put_zero(skb, sizeof(*hdr)); 2576 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2577 IEEE80211_FCTL_TODS | 2578 IEEE80211_FCTL_PROTECTED); 2579 2580 rcu_read_lock(); 2581 2582 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2583 2584 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2585 ether_addr_copy(hdr->addr2, bss_conf->addr); 2586 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2587 2588 rcu_read_unlock(); 2589 2590 skb_put_zero(skb, sec_hdr_len); 2591 2592 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2593 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2594 eapol_pkt->key_des_ver = key_des_ver; 2595 2596 return skb; 2597 } 2598 2599 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2600 struct rtw89_vif_link *rtwvif_link) 2601 { 2602 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2603 struct ieee80211_bss_conf *bss_conf; 2604 struct ieee80211_hdr_3addr *hdr; 2605 struct rtw89_sa_query *sa_query; 2606 struct sk_buff *skb; 2607 2608 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2609 if (!skb) 2610 return NULL; 2611 2612 hdr = skb_put_zero(skb, sizeof(*hdr)); 2613 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2614 IEEE80211_STYPE_ACTION | 2615 IEEE80211_FCTL_PROTECTED); 2616 2617 rcu_read_lock(); 2618 2619 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2620 2621 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2622 ether_addr_copy(hdr->addr2, bss_conf->addr); 2623 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2624 2625 rcu_read_unlock(); 2626 2627 skb_put_zero(skb, sec_hdr_len); 2628 2629 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2630 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2631 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2632 2633 return skb; 2634 } 2635 2636 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2637 struct rtw89_vif_link *rtwvif_link) 2638 { 2639 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2640 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2641 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2642 struct ieee80211_hdr_3addr *hdr; 2643 struct rtw89_arp_rsp *arp_skb; 2644 struct arphdr *arp_hdr; 2645 struct sk_buff *skb; 2646 __le16 fc; 2647 2648 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2649 if (!skb) 2650 return NULL; 2651 2652 hdr = skb_put_zero(skb, sizeof(*hdr)); 2653 2654 if (rtw_wow->ptk_alg) 2655 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2656 IEEE80211_FCTL_PROTECTED); 2657 else 2658 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2659 2660 hdr->frame_control = fc; 2661 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2662 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2663 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2664 2665 skb_put_zero(skb, sec_hdr_len); 2666 2667 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2668 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2669 arp_skb->llc_type = htons(ETH_P_ARP); 2670 2671 arp_hdr = &arp_skb->arp_hdr; 2672 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2673 arp_hdr->ar_pro = htons(ETH_P_IP); 2674 arp_hdr->ar_hln = ETH_ALEN; 2675 arp_hdr->ar_pln = 4; 2676 arp_hdr->ar_op = htons(ARPOP_REPLY); 2677 2678 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2679 arp_skb->sender_ip = rtwvif->ip_addr; 2680 2681 return skb; 2682 } 2683 2684 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2685 struct rtw89_vif_link *rtwvif_link, 2686 enum rtw89_fw_pkt_ofld_type type, 2687 u8 *id) 2688 { 2689 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2690 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2691 struct rtw89_pktofld_info *info; 2692 struct sk_buff *skb; 2693 int ret; 2694 2695 info = kzalloc(sizeof(*info), GFP_KERNEL); 2696 if (!info) 2697 return -ENOMEM; 2698 2699 switch (type) { 2700 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2701 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2702 break; 2703 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2704 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2705 break; 2706 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2707 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2708 break; 2709 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2710 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2711 break; 2712 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2713 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2714 break; 2715 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2716 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2717 break; 2718 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2719 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2720 break; 2721 default: 2722 goto err; 2723 } 2724 2725 if (!skb) 2726 goto err; 2727 2728 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2729 kfree_skb(skb); 2730 2731 if (ret) 2732 goto err; 2733 2734 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2735 *id = info->id; 2736 return 0; 2737 2738 err: 2739 kfree(info); 2740 return -ENOMEM; 2741 } 2742 2743 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2744 struct rtw89_vif_link *rtwvif_link, 2745 bool notify_fw) 2746 { 2747 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2748 struct rtw89_pktofld_info *info, *tmp; 2749 2750 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2751 if (notify_fw) 2752 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2753 else 2754 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2755 list_del(&info->list); 2756 kfree(info); 2757 } 2758 } 2759 2760 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2761 { 2762 struct rtw89_vif_link *rtwvif_link; 2763 struct rtw89_vif *rtwvif; 2764 unsigned int link_id; 2765 2766 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2767 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2768 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2769 notify_fw); 2770 } 2771 2772 #define H2C_GENERAL_PKT_LEN 6 2773 #define H2C_GENERAL_PKT_ID_UND 0xff 2774 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2775 struct rtw89_vif_link *rtwvif_link, u8 macid) 2776 { 2777 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2778 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2779 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2780 struct sk_buff *skb; 2781 int ret; 2782 2783 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2784 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2785 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2786 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2787 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2788 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2789 2790 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2791 if (!skb) { 2792 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2793 return -ENOMEM; 2794 } 2795 skb_put(skb, H2C_GENERAL_PKT_LEN); 2796 SET_GENERAL_PKT_MACID(skb->data, macid); 2797 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2798 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2799 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2800 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2801 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2802 2803 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2804 H2C_CAT_MAC, 2805 H2C_CL_FW_INFO, 2806 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2807 H2C_GENERAL_PKT_LEN); 2808 2809 ret = rtw89_h2c_tx(rtwdev, skb, false); 2810 if (ret) { 2811 rtw89_err(rtwdev, "failed to send h2c\n"); 2812 goto fail; 2813 } 2814 2815 return 0; 2816 fail: 2817 dev_kfree_skb_any(skb); 2818 2819 return ret; 2820 } 2821 2822 #define H2C_LPS_PARM_LEN 8 2823 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2824 struct rtw89_lps_parm *lps_param) 2825 { 2826 struct sk_buff *skb; 2827 int ret; 2828 2829 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2830 if (!skb) { 2831 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2832 return -ENOMEM; 2833 } 2834 skb_put(skb, H2C_LPS_PARM_LEN); 2835 2836 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2837 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2838 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2839 SET_LPS_PARM_RLBM(skb->data, 1); 2840 SET_LPS_PARM_SMARTPS(skb->data, 1); 2841 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2842 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2843 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2844 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2845 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2846 2847 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2848 H2C_CAT_MAC, 2849 H2C_CL_MAC_PS, 2850 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode, 2851 H2C_LPS_PARM_LEN); 2852 2853 ret = rtw89_h2c_tx(rtwdev, skb, false); 2854 if (ret) { 2855 rtw89_err(rtwdev, "failed to send h2c\n"); 2856 goto fail; 2857 } 2858 2859 return 0; 2860 fail: 2861 dev_kfree_skb_any(skb); 2862 2863 return ret; 2864 } 2865 2866 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2867 { 2868 const struct rtw89_chip_info *chip = rtwdev->chip; 2869 const struct rtw89_chan *chan; 2870 struct rtw89_vif_link *rtwvif_link; 2871 struct rtw89_h2c_lps_ch_info *h2c; 2872 u32 len = sizeof(*h2c); 2873 unsigned int link_id; 2874 struct sk_buff *skb; 2875 bool no_chan = true; 2876 u8 phy_idx; 2877 u32 done; 2878 int ret; 2879 2880 if (chip->chip_gen != RTW89_CHIP_BE) 2881 return 0; 2882 2883 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2884 if (!skb) { 2885 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2886 return -ENOMEM; 2887 } 2888 skb_put(skb, len); 2889 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2890 2891 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2892 phy_idx = rtwvif_link->phy_idx; 2893 if (phy_idx >= ARRAY_SIZE(h2c->info)) 2894 continue; 2895 2896 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2897 no_chan = false; 2898 2899 h2c->info[phy_idx].central_ch = chan->channel; 2900 h2c->info[phy_idx].pri_ch = chan->primary_channel; 2901 h2c->info[phy_idx].band = chan->band_type; 2902 h2c->info[phy_idx].bw = chan->band_width; 2903 } 2904 2905 if (no_chan) { 2906 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2907 ret = -ENOENT; 2908 goto fail; 2909 } 2910 2911 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2912 2913 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2914 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2915 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2916 2917 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2918 ret = rtw89_h2c_tx(rtwdev, skb, false); 2919 if (ret) { 2920 rtw89_err(rtwdev, "failed to send h2c\n"); 2921 goto fail; 2922 } 2923 2924 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2925 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2926 if (ret) 2927 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2928 2929 return 0; 2930 fail: 2931 dev_kfree_skb_any(skb); 2932 2933 return ret; 2934 } 2935 2936 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 2937 struct rtw89_vif *rtwvif) 2938 { 2939 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 2940 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2941 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12}; 2942 const struct rtw89_chip_info *chip = rtwdev->chip; 2943 struct rtw89_efuse *efuse = &rtwdev->efuse; 2944 struct rtw89_h2c_lps_ml_cmn_info *h2c; 2945 struct rtw89_vif_link *rtwvif_link; 2946 const struct rtw89_chan *chan; 2947 u8 bw_idx = RTW89_BB_BW_20_40; 2948 u32 len = sizeof(*h2c); 2949 unsigned int link_id; 2950 struct sk_buff *skb; 2951 u8 beacon_bw_ofst; 2952 u8 gain_band; 2953 u32 done; 2954 u8 path; 2955 int ret; 2956 int i; 2957 2958 if (chip->chip_gen != RTW89_CHIP_BE) 2959 return 0; 2960 2961 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2962 if (!skb) { 2963 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 2964 return -ENOMEM; 2965 } 2966 skb_put(skb, len); 2967 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 2968 2969 h2c->fmt_id = 0x3; 2970 2971 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2972 h2c->rfe_type = efuse->rfe_type; 2973 2974 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2975 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 2976 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2977 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 2978 2979 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 2980 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 2981 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 2982 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 2983 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 2984 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 2985 else 2986 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 2987 2988 /* Fill BW20 RX gain table for beacon mode */ 2989 for (i = 0; i < TIA_GAIN_NUM; i++) { 2990 h2c->tia_gain[rtwvif_link->phy_idx][i] = 2991 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 2992 } 2993 2994 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) { 2995 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx]; 2996 h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst; 2997 } 2998 2999 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 3000 gain->lna_gain[gain_band][bw_idx][path], 3001 LNA_GAIN_NUM); 3002 memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx], 3003 gain->tia_lna_op1db[gain_band][bw_idx][path], 3004 LNA_GAIN_NUM + 1); 3005 memcpy(h2c->lna_op1db[rtwvif_link->phy_idx], 3006 gain->lna_op1db[gain_band][bw_idx][path], 3007 LNA_GAIN_NUM); 3008 } 3009 3010 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3011 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 3012 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 3013 3014 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 3015 ret = rtw89_h2c_tx(rtwdev, skb, false); 3016 if (ret) { 3017 rtw89_err(rtwdev, "failed to send h2c\n"); 3018 goto fail; 3019 } 3020 3021 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 3022 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 3023 if (ret) 3024 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 3025 3026 return 0; 3027 fail: 3028 dev_kfree_skb_any(skb); 3029 3030 return ret; 3031 } 3032 3033 #define H2C_P2P_ACT_LEN 20 3034 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 3035 struct rtw89_vif_link *rtwvif_link, 3036 struct ieee80211_p2p_noa_desc *desc, 3037 u8 act, u8 noa_id, u8 ctwindow_oppps) 3038 { 3039 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 3040 struct sk_buff *skb; 3041 u8 *cmd; 3042 int ret; 3043 3044 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 3045 if (!skb) { 3046 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3047 return -ENOMEM; 3048 } 3049 skb_put(skb, H2C_P2P_ACT_LEN); 3050 cmd = skb->data; 3051 3052 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 3053 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 3054 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 3055 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 3056 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 3057 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 3058 if (desc) { 3059 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 3060 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 3061 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 3062 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 3063 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 3064 } 3065 3066 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3067 H2C_CAT_MAC, H2C_CL_MAC_PS, 3068 H2C_FUNC_P2P_ACT, 0, 0, 3069 H2C_P2P_ACT_LEN); 3070 3071 ret = rtw89_h2c_tx(rtwdev, skb, false); 3072 if (ret) { 3073 rtw89_err(rtwdev, "failed to send h2c\n"); 3074 goto fail; 3075 } 3076 3077 return 0; 3078 fail: 3079 dev_kfree_skb_any(skb); 3080 3081 return ret; 3082 } 3083 3084 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 3085 struct sk_buff *skb) 3086 { 3087 const struct rtw89_chip_info *chip = rtwdev->chip; 3088 struct rtw89_hal *hal = &rtwdev->hal; 3089 u8 ntx_path; 3090 u8 map_b; 3091 3092 if (chip->rf_path_num == 1) { 3093 ntx_path = RF_A; 3094 map_b = 0; 3095 } else { 3096 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB; 3097 map_b = ntx_path == RF_AB ? 1 : 0; 3098 } 3099 3100 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 3101 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 3102 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 3103 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 3104 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 3105 } 3106 3107 #define H2C_CMC_TBL_LEN 68 3108 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 3109 struct rtw89_vif_link *rtwvif_link, 3110 struct rtw89_sta_link *rtwsta_link) 3111 { 3112 const struct rtw89_chip_info *chip = rtwdev->chip; 3113 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3114 struct sk_buff *skb; 3115 int ret; 3116 3117 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3118 if (!skb) { 3119 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3120 return -ENOMEM; 3121 } 3122 skb_put(skb, H2C_CMC_TBL_LEN); 3123 SET_CTRL_INFO_MACID(skb->data, macid); 3124 SET_CTRL_INFO_OPERATION(skb->data, 1); 3125 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3126 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 3127 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3128 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 3129 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 3130 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 3131 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 3132 } 3133 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 3134 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 3135 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3136 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3137 3138 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3139 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3140 chip->h2c_cctl_func_id, 0, 1, 3141 H2C_CMC_TBL_LEN); 3142 3143 ret = rtw89_h2c_tx(rtwdev, skb, false); 3144 if (ret) { 3145 rtw89_err(rtwdev, "failed to send h2c\n"); 3146 goto fail; 3147 } 3148 3149 return 0; 3150 fail: 3151 dev_kfree_skb_any(skb); 3152 3153 return ret; 3154 } 3155 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 3156 3157 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3158 struct rtw89_vif_link *rtwvif_link, 3159 struct rtw89_sta_link *rtwsta_link) 3160 { 3161 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3162 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3163 u32 len = sizeof(*h2c); 3164 struct sk_buff *skb; 3165 int ret; 3166 3167 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3168 if (!skb) { 3169 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3170 return -ENOMEM; 3171 } 3172 skb_put(skb, len); 3173 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3174 3175 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3176 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3177 3178 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 3179 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 3180 3181 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 3182 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 3183 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3184 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 3185 3186 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 3187 3188 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 3189 3190 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3191 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 3192 3193 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3194 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3195 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3196 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3197 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3198 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 3199 3200 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 3201 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 3202 3203 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 3204 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 3205 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 3206 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 3207 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 3208 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 3209 3210 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 3211 3212 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 3213 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 3214 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 3215 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 3216 3217 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 3218 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 3219 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 3220 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 3221 3222 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3223 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3224 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3225 len); 3226 3227 ret = rtw89_h2c_tx(rtwdev, skb, false); 3228 if (ret) { 3229 rtw89_err(rtwdev, "failed to send h2c\n"); 3230 goto fail; 3231 } 3232 3233 return 0; 3234 fail: 3235 dev_kfree_skb_any(skb); 3236 3237 return ret; 3238 } 3239 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 3240 3241 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 3242 struct ieee80211_link_sta *link_sta, 3243 u8 *pads) 3244 { 3245 bool ppe_th; 3246 u8 ppe16, ppe8; 3247 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3248 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 3249 u8 ru_bitmap; 3250 u8 n, idx, sh; 3251 u16 ppe; 3252 int i; 3253 3254 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 3255 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 3256 if (!ppe_th) { 3257 u8 pad; 3258 3259 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 3260 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 3261 3262 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3263 pads[i] = pad; 3264 3265 return; 3266 } 3267 3268 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 3269 n = hweight8(ru_bitmap); 3270 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3271 3272 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3273 if (!(ru_bitmap & BIT(i))) { 3274 pads[i] = 1; 3275 continue; 3276 } 3277 3278 idx = n >> 3; 3279 sh = n & 7; 3280 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3281 3282 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3283 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3284 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3285 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3286 3287 if (ppe16 != 7 && ppe8 == 7) 3288 pads[i] = RTW89_PE_DURATION_16; 3289 else if (ppe8 != 7) 3290 pads[i] = RTW89_PE_DURATION_8; 3291 else 3292 pads[i] = RTW89_PE_DURATION_0; 3293 } 3294 } 3295 3296 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3297 struct rtw89_vif_link *rtwvif_link, 3298 struct rtw89_sta_link *rtwsta_link) 3299 { 3300 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3301 const struct rtw89_chip_info *chip = rtwdev->chip; 3302 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3303 rtwvif_link->chanctx_idx); 3304 struct ieee80211_link_sta *link_sta; 3305 struct sk_buff *skb; 3306 u8 pads[RTW89_PPE_BW_NUM]; 3307 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3308 u16 lowest_rate; 3309 int ret; 3310 3311 memset(pads, 0, sizeof(pads)); 3312 3313 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3314 if (!skb) { 3315 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3316 return -ENOMEM; 3317 } 3318 3319 rcu_read_lock(); 3320 3321 if (rtwsta_link) 3322 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3323 3324 if (rtwsta_link && link_sta->he_cap.has_he) 3325 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3326 3327 if (vif->p2p) 3328 lowest_rate = RTW89_HW_RATE_OFDM6; 3329 else if (chan->band_type == RTW89_BAND_2G) 3330 lowest_rate = RTW89_HW_RATE_CCK1; 3331 else 3332 lowest_rate = RTW89_HW_RATE_OFDM6; 3333 3334 skb_put(skb, H2C_CMC_TBL_LEN); 3335 SET_CTRL_INFO_MACID(skb->data, mac_id); 3336 SET_CTRL_INFO_OPERATION(skb->data, 1); 3337 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3338 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3339 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3340 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3341 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3342 if (vif->type == NL80211_IFTYPE_STATION) 3343 SET_CMC_TBL_ULDL(skb->data, 1); 3344 else 3345 SET_CMC_TBL_ULDL(skb->data, 0); 3346 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3347 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3348 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3349 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3350 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3351 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3352 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3353 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3354 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3355 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3356 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3357 } 3358 if (rtwsta_link) 3359 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3360 link_sta->he_cap.has_he); 3361 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3362 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3363 3364 rcu_read_unlock(); 3365 3366 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3367 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3368 chip->h2c_cctl_func_id, 0, 1, 3369 H2C_CMC_TBL_LEN); 3370 3371 ret = rtw89_h2c_tx(rtwdev, skb, false); 3372 if (ret) { 3373 rtw89_err(rtwdev, "failed to send h2c\n"); 3374 goto fail; 3375 } 3376 3377 return 0; 3378 fail: 3379 dev_kfree_skb_any(skb); 3380 3381 return ret; 3382 } 3383 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3384 3385 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3386 struct ieee80211_link_sta *link_sta, 3387 u8 *pads) 3388 { 3389 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3390 u16 ppe_thres_hdr; 3391 u8 ppe16, ppe8; 3392 u8 n, idx, sh; 3393 u8 ru_bitmap; 3394 bool ppe_th; 3395 u16 ppe; 3396 int i; 3397 3398 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3399 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3400 if (!ppe_th) { 3401 u8 pad; 3402 3403 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3404 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3405 3406 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3407 pads[i] = pad; 3408 3409 return; 3410 } 3411 3412 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3413 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3414 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3415 n = hweight8(ru_bitmap); 3416 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3417 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3418 3419 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3420 if (!(ru_bitmap & BIT(i))) { 3421 pads[i] = 1; 3422 continue; 3423 } 3424 3425 idx = n >> 3; 3426 sh = n & 7; 3427 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3428 3429 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3430 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3431 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3432 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3433 3434 if (ppe16 != 7 && ppe8 == 7) 3435 pads[i] = RTW89_PE_DURATION_16_20; 3436 else if (ppe8 != 7) 3437 pads[i] = RTW89_PE_DURATION_8; 3438 else 3439 pads[i] = RTW89_PE_DURATION_0; 3440 } 3441 } 3442 3443 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3444 struct rtw89_vif_link *rtwvif_link, 3445 struct rtw89_sta_link *rtwsta_link) 3446 { 3447 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3448 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3449 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3450 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3451 struct ieee80211_bss_conf *bss_conf; 3452 struct ieee80211_link_sta *link_sta; 3453 u8 pads[RTW89_PPE_BW_NUM]; 3454 u32 len = sizeof(*h2c); 3455 struct sk_buff *skb; 3456 u16 lowest_rate; 3457 int ret; 3458 3459 memset(pads, 0, sizeof(pads)); 3460 3461 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3462 if (!skb) { 3463 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3464 return -ENOMEM; 3465 } 3466 3467 rcu_read_lock(); 3468 3469 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3470 3471 if (rtwsta_link) { 3472 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3473 3474 if (link_sta->eht_cap.has_eht) 3475 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3476 else if (link_sta->he_cap.has_he) 3477 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3478 } 3479 3480 if (vif->p2p) 3481 lowest_rate = RTW89_HW_RATE_OFDM6; 3482 else if (chan->band_type == RTW89_BAND_2G) 3483 lowest_rate = RTW89_HW_RATE_CCK1; 3484 else 3485 lowest_rate = RTW89_HW_RATE_OFDM6; 3486 3487 skb_put(skb, len); 3488 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3489 3490 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3491 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3492 3493 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3494 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3495 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3496 CCTLINFO_G7_W0_DISDATAFB); 3497 3498 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3499 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3500 3501 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3502 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3503 3504 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3505 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3506 3507 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3508 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3509 3510 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3511 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3512 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3513 } 3514 3515 if (bss_conf->eht_support) { 3516 u16 punct = bss_conf->chanreq.oper.punctured; 3517 3518 h2c->w4 |= le32_encode_bits(~punct, 3519 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3520 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3521 } 3522 3523 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3524 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3525 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3526 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3527 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3528 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3529 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3530 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3531 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3532 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3533 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3534 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3535 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3536 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3537 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3538 3539 h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) | 3540 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3541 CCTLINFO_G7_W6_ULDL); 3542 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL); 3543 3544 if (rtwsta_link) { 3545 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3546 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3547 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3548 } 3549 3550 rcu_read_unlock(); 3551 3552 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3553 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3554 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3555 len); 3556 3557 ret = rtw89_h2c_tx(rtwdev, skb, false); 3558 if (ret) { 3559 rtw89_err(rtwdev, "failed to send h2c\n"); 3560 goto fail; 3561 } 3562 3563 return 0; 3564 fail: 3565 dev_kfree_skb_any(skb); 3566 3567 return ret; 3568 } 3569 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3570 3571 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3572 struct rtw89_vif_link *rtwvif_link, 3573 struct rtw89_sta_link *rtwsta_link) 3574 { 3575 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3576 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3577 u32 len = sizeof(*h2c); 3578 struct sk_buff *skb; 3579 u16 agg_num = 0; 3580 u8 ba_bmap = 0; 3581 int ret; 3582 u8 tid; 3583 3584 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3585 if (!skb) { 3586 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3587 return -ENOMEM; 3588 } 3589 skb_put(skb, len); 3590 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3591 3592 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3593 if (agg_num == 0) 3594 agg_num = rtwsta->ampdu_params[tid].agg_num; 3595 else 3596 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3597 } 3598 3599 if (agg_num <= 0x20) 3600 ba_bmap = 3; 3601 else if (agg_num > 0x20 && agg_num <= 0x40) 3602 ba_bmap = 0; 3603 else if (agg_num > 0x40 && agg_num <= 0x80) 3604 ba_bmap = 1; 3605 else if (agg_num > 0x80 && agg_num <= 0x100) 3606 ba_bmap = 2; 3607 else if (agg_num > 0x100 && agg_num <= 0x200) 3608 ba_bmap = 4; 3609 else if (agg_num > 0x200 && agg_num <= 0x400) 3610 ba_bmap = 5; 3611 3612 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3613 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3614 3615 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3616 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3617 3618 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3619 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3620 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3621 len); 3622 3623 ret = rtw89_h2c_tx(rtwdev, skb, false); 3624 if (ret) { 3625 rtw89_err(rtwdev, "failed to send h2c\n"); 3626 goto fail; 3627 } 3628 3629 return 0; 3630 fail: 3631 dev_kfree_skb_any(skb); 3632 3633 return ret; 3634 } 3635 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3636 3637 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3638 struct rtw89_sta_link *rtwsta_link) 3639 { 3640 const struct rtw89_chip_info *chip = rtwdev->chip; 3641 struct sk_buff *skb; 3642 int ret; 3643 3644 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3645 if (!skb) { 3646 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3647 return -ENOMEM; 3648 } 3649 skb_put(skb, H2C_CMC_TBL_LEN); 3650 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3651 SET_CTRL_INFO_OPERATION(skb->data, 1); 3652 if (rtwsta_link->cctl_tx_time) { 3653 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3654 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3655 } 3656 if (rtwsta_link->cctl_tx_retry_limit) { 3657 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3658 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3659 } 3660 3661 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3662 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3663 chip->h2c_cctl_func_id, 0, 1, 3664 H2C_CMC_TBL_LEN); 3665 3666 ret = rtw89_h2c_tx(rtwdev, skb, false); 3667 if (ret) { 3668 rtw89_err(rtwdev, "failed to send h2c\n"); 3669 goto fail; 3670 } 3671 3672 return 0; 3673 fail: 3674 dev_kfree_skb_any(skb); 3675 3676 return ret; 3677 } 3678 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl); 3679 3680 int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3681 struct rtw89_sta_link *rtwsta_link) 3682 { 3683 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3684 u32 len = sizeof(*h2c); 3685 struct sk_buff *skb; 3686 int ret; 3687 3688 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3689 if (!skb) { 3690 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n"); 3691 return -ENOMEM; 3692 } 3693 skb_put(skb, len); 3694 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3695 3696 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3697 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3698 3699 if (rtwsta_link->cctl_tx_time) { 3700 h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL); 3701 h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL); 3702 3703 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time, 3704 CCTLINFO_G7_W2_AMPDU_MAX_TIME); 3705 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME); 3706 } 3707 if (rtwsta_link->cctl_tx_retry_limit) { 3708 h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) | 3709 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt, 3710 CCTLINFO_G7_W2_DATA_TX_CNT_LMT); 3711 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL | 3712 CCTLINFO_G7_W2_DATA_TX_CNT_LMT); 3713 } 3714 3715 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3716 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3717 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3718 len); 3719 3720 ret = rtw89_h2c_tx(rtwdev, skb, false); 3721 if (ret) { 3722 rtw89_err(rtwdev, "failed to send h2c\n"); 3723 goto fail; 3724 } 3725 3726 return 0; 3727 fail: 3728 dev_kfree_skb_any(skb); 3729 3730 return ret; 3731 } 3732 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7); 3733 3734 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3735 struct rtw89_sta_link *rtwsta_link) 3736 { 3737 const struct rtw89_chip_info *chip = rtwdev->chip; 3738 struct sk_buff *skb; 3739 int ret; 3740 3741 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3742 return 0; 3743 3744 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3745 if (!skb) { 3746 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3747 return -ENOMEM; 3748 } 3749 skb_put(skb, H2C_CMC_TBL_LEN); 3750 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3751 SET_CTRL_INFO_OPERATION(skb->data, 1); 3752 3753 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3754 3755 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3756 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3757 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3758 H2C_CMC_TBL_LEN); 3759 3760 ret = rtw89_h2c_tx(rtwdev, skb, false); 3761 if (ret) { 3762 rtw89_err(rtwdev, "failed to send h2c\n"); 3763 goto fail; 3764 } 3765 3766 return 0; 3767 fail: 3768 dev_kfree_skb_any(skb); 3769 3770 return ret; 3771 } 3772 3773 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3774 struct rtw89_vif_link *rtwvif_link) 3775 { 3776 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3777 rtwvif_link->chanctx_idx); 3778 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3779 struct rtw89_h2c_bcn_upd *h2c; 3780 struct sk_buff *skb_beacon; 3781 struct ieee80211_hdr *hdr; 3782 u32 len = sizeof(*h2c); 3783 struct sk_buff *skb; 3784 int bcn_total_len; 3785 u16 beacon_rate; 3786 u16 tim_offset; 3787 void *noa_data; 3788 u8 noa_len; 3789 int ret; 3790 3791 if (vif->p2p) 3792 beacon_rate = RTW89_HW_RATE_OFDM6; 3793 else if (chan->band_type == RTW89_BAND_2G) 3794 beacon_rate = RTW89_HW_RATE_CCK1; 3795 else 3796 beacon_rate = RTW89_HW_RATE_OFDM6; 3797 3798 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3799 NULL, 0); 3800 if (!skb_beacon) { 3801 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3802 return -ENOMEM; 3803 } 3804 3805 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3806 if (noa_len && 3807 (noa_len <= skb_tailroom(skb_beacon) || 3808 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3809 skb_put_data(skb_beacon, noa_data, noa_len); 3810 } 3811 3812 hdr = (struct ieee80211_hdr *)skb_beacon; 3813 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3814 3815 bcn_total_len = len + skb_beacon->len; 3816 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3817 if (!skb) { 3818 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3819 dev_kfree_skb_any(skb_beacon); 3820 return -ENOMEM; 3821 } 3822 skb_put(skb, len); 3823 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3824 3825 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3826 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3827 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3828 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3829 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3830 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3831 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3832 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3833 3834 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3835 dev_kfree_skb_any(skb_beacon); 3836 3837 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3838 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3839 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3840 bcn_total_len); 3841 3842 ret = rtw89_h2c_tx(rtwdev, skb, false); 3843 if (ret) { 3844 rtw89_err(rtwdev, "failed to send h2c\n"); 3845 dev_kfree_skb_any(skb); 3846 return ret; 3847 } 3848 3849 return 0; 3850 } 3851 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3852 3853 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3854 struct rtw89_vif_link *rtwvif_link) 3855 { 3856 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3857 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3858 struct rtw89_h2c_bcn_upd_be *h2c; 3859 struct sk_buff *skb_beacon; 3860 struct ieee80211_hdr *hdr; 3861 u32 len = sizeof(*h2c); 3862 struct sk_buff *skb; 3863 int bcn_total_len; 3864 u16 beacon_rate; 3865 u16 tim_offset; 3866 void *noa_data; 3867 u8 noa_len; 3868 int ret; 3869 3870 if (vif->p2p) 3871 beacon_rate = RTW89_HW_RATE_OFDM6; 3872 else if (chan->band_type == RTW89_BAND_2G) 3873 beacon_rate = RTW89_HW_RATE_CCK1; 3874 else 3875 beacon_rate = RTW89_HW_RATE_OFDM6; 3876 3877 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3878 NULL, 0); 3879 if (!skb_beacon) { 3880 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3881 return -ENOMEM; 3882 } 3883 3884 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3885 if (noa_len && 3886 (noa_len <= skb_tailroom(skb_beacon) || 3887 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3888 skb_put_data(skb_beacon, noa_data, noa_len); 3889 } 3890 3891 hdr = (struct ieee80211_hdr *)skb_beacon; 3892 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3893 3894 bcn_total_len = len + skb_beacon->len; 3895 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3896 if (!skb) { 3897 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3898 dev_kfree_skb_any(skb_beacon); 3899 return -ENOMEM; 3900 } 3901 skb_put(skb, len); 3902 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3903 3904 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3905 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3906 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3907 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3908 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3909 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3910 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3911 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3912 3913 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3914 dev_kfree_skb_any(skb_beacon); 3915 3916 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3917 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3918 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3919 bcn_total_len); 3920 3921 ret = rtw89_h2c_tx(rtwdev, skb, false); 3922 if (ret) { 3923 rtw89_err(rtwdev, "failed to send h2c\n"); 3924 goto fail; 3925 } 3926 3927 return 0; 3928 3929 fail: 3930 dev_kfree_skb_any(skb); 3931 3932 return ret; 3933 } 3934 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3935 3936 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3937 struct rtw89_vif_link *rtwvif_link, 3938 struct rtw89_sta_link *rtwsta_link, 3939 enum rtw89_upd_mode upd_mode) 3940 { 3941 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3942 struct rtw89_h2c_role_maintain *h2c; 3943 u32 len = sizeof(*h2c); 3944 struct sk_buff *skb; 3945 u8 self_role; 3946 int ret; 3947 3948 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3949 if (rtwsta_link) 3950 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3951 else 3952 self_role = rtwvif_link->self_role; 3953 } else { 3954 self_role = rtwvif_link->self_role; 3955 } 3956 3957 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3958 if (!skb) { 3959 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3960 return -ENOMEM; 3961 } 3962 skb_put(skb, len); 3963 h2c = (struct rtw89_h2c_role_maintain *)skb->data; 3964 3965 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) | 3966 le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) | 3967 le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) | 3968 le32_encode_bits(rtwvif_link->wifi_role, 3969 RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) | 3970 le32_encode_bits(rtwvif_link->mac_idx, 3971 RTW89_H2C_ROLE_MAINTAIN_W0_BAND) | 3972 le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT); 3973 3974 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3975 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3976 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3977 len); 3978 3979 ret = rtw89_h2c_tx(rtwdev, skb, false); 3980 if (ret) { 3981 rtw89_err(rtwdev, "failed to send h2c\n"); 3982 goto fail; 3983 } 3984 3985 return 0; 3986 fail: 3987 dev_kfree_skb_any(skb); 3988 3989 return ret; 3990 } 3991 3992 static enum rtw89_fw_sta_type 3993 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3994 struct rtw89_sta_link *rtwsta_link) 3995 { 3996 struct ieee80211_bss_conf *bss_conf; 3997 struct ieee80211_link_sta *link_sta; 3998 enum rtw89_fw_sta_type type; 3999 4000 rcu_read_lock(); 4001 4002 if (!rtwsta_link) 4003 goto by_vif; 4004 4005 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 4006 4007 if (link_sta->eht_cap.has_eht) 4008 type = RTW89_FW_BE_STA; 4009 else if (link_sta->he_cap.has_he) 4010 type = RTW89_FW_AX_STA; 4011 else 4012 type = RTW89_FW_N_AC_STA; 4013 4014 goto out; 4015 4016 by_vif: 4017 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4018 4019 if (bss_conf->eht_support) 4020 type = RTW89_FW_BE_STA; 4021 else if (bss_conf->he_support) 4022 type = RTW89_FW_AX_STA; 4023 else 4024 type = RTW89_FW_N_AC_STA; 4025 4026 out: 4027 rcu_read_unlock(); 4028 4029 return type; 4030 } 4031 4032 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4033 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 4034 { 4035 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 4036 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4037 bool is_mld = ieee80211_vif_is_mld(vif); 4038 u8 self_role = rtwvif_link->self_role; 4039 enum rtw89_fw_sta_type sta_type; 4040 u8 net_type = rtwvif_link->net_type; 4041 struct rtw89_h2c_join_v1 *h2c_v1; 4042 struct rtw89_h2c_join *h2c; 4043 u32 len = sizeof(*h2c); 4044 bool format_v1 = false; 4045 struct sk_buff *skb; 4046 u8 main_mac_id; 4047 bool init_ps; 4048 int ret; 4049 4050 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 4051 len = sizeof(*h2c_v1); 4052 format_v1 = true; 4053 } 4054 4055 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 4056 self_role = RTW89_SELF_ROLE_AP_CLIENT; 4057 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 4058 } 4059 4060 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4061 if (!skb) { 4062 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4063 return -ENOMEM; 4064 } 4065 skb_put(skb, len); 4066 h2c = (struct rtw89_h2c_join *)skb->data; 4067 4068 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 4069 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 4070 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 4071 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 4072 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 4073 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 4074 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 4075 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 4076 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 4077 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 4078 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 4079 le32_encode_bits(rtwvif_link->wifi_role, 4080 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 4081 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 4082 4083 if (!format_v1) 4084 goto done; 4085 4086 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 4087 4088 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 4089 init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif); 4090 4091 if (rtwsta_link) 4092 main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta); 4093 else 4094 main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif); 4095 4096 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) | 4097 le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) | 4098 le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) | 4099 le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR, 4100 RTW89_H2C_JOININFO_W1_MLO_MODE) | 4101 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) | 4102 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) | 4103 le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) | 4104 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US, 4105 RTW89_H2C_JOININFO_W1_EMLSR_PADDING) | 4106 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US, 4107 RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) | 4108 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) | 4109 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT); 4110 4111 h2c_v1->w2 = 0; 4112 4113 done: 4114 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4115 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4116 H2C_FUNC_MAC_JOININFO, 0, 1, 4117 len); 4118 4119 ret = rtw89_h2c_tx(rtwdev, skb, false); 4120 if (ret) { 4121 rtw89_err(rtwdev, "failed to send h2c\n"); 4122 goto fail; 4123 } 4124 4125 return 0; 4126 fail: 4127 dev_kfree_skb_any(skb); 4128 4129 return ret; 4130 } 4131 4132 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 4133 { 4134 struct rtw89_h2c_notify_dbcc *h2c; 4135 u32 len = sizeof(*h2c); 4136 struct sk_buff *skb; 4137 int ret; 4138 4139 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4140 if (!skb) { 4141 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 4142 return -ENOMEM; 4143 } 4144 skb_put(skb, len); 4145 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 4146 4147 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 4148 4149 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4150 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4151 H2C_FUNC_NOTIFY_DBCC, 0, 1, 4152 len); 4153 4154 ret = rtw89_h2c_tx(rtwdev, skb, false); 4155 if (ret) { 4156 rtw89_err(rtwdev, "failed to send h2c\n"); 4157 goto fail; 4158 } 4159 4160 return 0; 4161 fail: 4162 dev_kfree_skb_any(skb); 4163 4164 return ret; 4165 } 4166 4167 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 4168 bool pause) 4169 { 4170 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 4171 struct rtw89_fw_macid_pause_grp *h2c; 4172 __le32 set = cpu_to_le32(BIT(sh)); 4173 u8 h2c_macid_pause_id; 4174 struct sk_buff *skb; 4175 u32 len; 4176 int ret; 4177 4178 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 4179 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 4180 len = sizeof(*h2c_new); 4181 } else { 4182 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 4183 len = sizeof(*h2c); 4184 } 4185 4186 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4187 if (!skb) { 4188 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 4189 return -ENOMEM; 4190 } 4191 skb_put(skb, len); 4192 4193 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 4194 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 4195 4196 h2c_new->n[0].pause_mask_grp[grp] = set; 4197 h2c_new->n[0].sleep_mask_grp[grp] = set; 4198 if (pause) { 4199 h2c_new->n[0].pause_grp[grp] = set; 4200 h2c_new->n[0].sleep_grp[grp] = set; 4201 } 4202 } else { 4203 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 4204 4205 h2c->mask_grp[grp] = set; 4206 if (pause) 4207 h2c->pause_grp[grp] = set; 4208 } 4209 4210 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4211 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4212 h2c_macid_pause_id, 1, 0, 4213 len); 4214 4215 ret = rtw89_h2c_tx(rtwdev, skb, false); 4216 if (ret) { 4217 rtw89_err(rtwdev, "failed to send h2c\n"); 4218 goto fail; 4219 } 4220 4221 return 0; 4222 fail: 4223 dev_kfree_skb_any(skb); 4224 4225 return ret; 4226 } 4227 4228 #define H2C_EDCA_LEN 12 4229 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4230 u8 ac, u32 val) 4231 { 4232 struct sk_buff *skb; 4233 int ret; 4234 4235 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 4236 if (!skb) { 4237 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 4238 return -ENOMEM; 4239 } 4240 skb_put(skb, H2C_EDCA_LEN); 4241 RTW89_SET_EDCA_SEL(skb->data, 0); 4242 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 4243 RTW89_SET_EDCA_WMM(skb->data, 0); 4244 RTW89_SET_EDCA_AC(skb->data, ac); 4245 RTW89_SET_EDCA_PARAM(skb->data, val); 4246 4247 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4248 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4249 H2C_FUNC_USR_EDCA, 0, 1, 4250 H2C_EDCA_LEN); 4251 4252 ret = rtw89_h2c_tx(rtwdev, skb, false); 4253 if (ret) { 4254 rtw89_err(rtwdev, "failed to send h2c\n"); 4255 goto fail; 4256 } 4257 4258 return 0; 4259 fail: 4260 dev_kfree_skb_any(skb); 4261 4262 return ret; 4263 } 4264 4265 #define H2C_TSF32_TOGL_LEN 4 4266 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 4267 struct rtw89_vif_link *rtwvif_link, 4268 bool en) 4269 { 4270 struct sk_buff *skb; 4271 u16 early_us = en ? 2000 : 0; 4272 u8 *cmd; 4273 int ret; 4274 4275 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 4276 if (!skb) { 4277 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 4278 return -ENOMEM; 4279 } 4280 skb_put(skb, H2C_TSF32_TOGL_LEN); 4281 cmd = skb->data; 4282 4283 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 4284 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 4285 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 4286 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 4287 4288 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4289 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4290 H2C_FUNC_TSF32_TOGL, 0, 0, 4291 H2C_TSF32_TOGL_LEN); 4292 4293 ret = rtw89_h2c_tx(rtwdev, skb, false); 4294 if (ret) { 4295 rtw89_err(rtwdev, "failed to send h2c\n"); 4296 goto fail; 4297 } 4298 4299 return 0; 4300 fail: 4301 dev_kfree_skb_any(skb); 4302 4303 return ret; 4304 } 4305 4306 #define H2C_OFLD_CFG_LEN 8 4307 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 4308 { 4309 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 4310 struct sk_buff *skb; 4311 int ret; 4312 4313 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 4314 if (!skb) { 4315 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 4316 return -ENOMEM; 4317 } 4318 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 4319 4320 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4321 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4322 H2C_FUNC_OFLD_CFG, 0, 1, 4323 H2C_OFLD_CFG_LEN); 4324 4325 ret = rtw89_h2c_tx(rtwdev, skb, false); 4326 if (ret) { 4327 rtw89_err(rtwdev, "failed to send h2c\n"); 4328 goto fail; 4329 } 4330 4331 return 0; 4332 fail: 4333 dev_kfree_skb_any(skb); 4334 4335 return ret; 4336 } 4337 4338 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 4339 { 4340 struct rtw89_h2c_tx_duty *h2c; 4341 u32 len = sizeof(*h2c); 4342 struct sk_buff *skb; 4343 u16 pause, active; 4344 int ret; 4345 4346 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4347 if (!skb) { 4348 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 4349 return -ENOMEM; 4350 } 4351 4352 skb_put(skb, len); 4353 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 4354 4355 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 4356 4357 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 4358 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 4359 } else { 4360 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 4361 pause = 100 - active; 4362 4363 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 4364 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 4365 } 4366 4367 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4368 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4369 H2C_FUNC_TX_DUTY, 0, 0, len); 4370 4371 ret = rtw89_h2c_tx(rtwdev, skb, false); 4372 if (ret) { 4373 rtw89_err(rtwdev, "failed to send h2c\n"); 4374 goto fail; 4375 } 4376 4377 return 0; 4378 fail: 4379 dev_kfree_skb_any(skb); 4380 4381 return ret; 4382 } 4383 4384 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 4385 struct rtw89_vif_link *rtwvif_link, 4386 bool connect) 4387 { 4388 struct ieee80211_bss_conf *bss_conf; 4389 s32 thold = RTW89_DEFAULT_CQM_THOLD; 4390 u32 hyst = RTW89_DEFAULT_CQM_HYST; 4391 struct rtw89_h2c_bcnfltr *h2c; 4392 u32 len = sizeof(*h2c); 4393 struct sk_buff *skb; 4394 u8 max_cnt, cnt; 4395 int ret; 4396 4397 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4398 return -EINVAL; 4399 4400 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4401 return -EINVAL; 4402 4403 rcu_read_lock(); 4404 4405 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4406 4407 if (bss_conf->cqm_rssi_hyst) 4408 hyst = bss_conf->cqm_rssi_hyst; 4409 if (bss_conf->cqm_rssi_thold) 4410 thold = bss_conf->cqm_rssi_thold; 4411 4412 rcu_read_unlock(); 4413 4414 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4415 if (!skb) { 4416 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 4417 return -ENOMEM; 4418 } 4419 4420 skb_put(skb, len); 4421 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 4422 4423 if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw)) 4424 max_cnt = BIT(7) - 1; 4425 else 4426 max_cnt = BIT(4) - 1; 4427 4428 cnt = min(RTW89_BCN_LOSS_CNT, max_cnt); 4429 4430 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 4431 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 4432 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 4433 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 4434 RTW89_H2C_BCNFLTR_W0_MODE) | 4435 le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) | 4436 le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) | 4437 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 4438 le32_encode_bits(thold + MAX_RSSI, 4439 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 4440 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 4441 4442 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4443 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4444 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 4445 4446 ret = rtw89_h2c_tx(rtwdev, skb, false); 4447 if (ret) { 4448 rtw89_err(rtwdev, "failed to send h2c\n"); 4449 goto fail; 4450 } 4451 4452 return 0; 4453 fail: 4454 dev_kfree_skb_any(skb); 4455 4456 return ret; 4457 } 4458 4459 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 4460 struct rtw89_rx_phy_ppdu *phy_ppdu) 4461 { 4462 struct rtw89_h2c_ofld_rssi *h2c; 4463 u32 len = sizeof(*h2c); 4464 struct sk_buff *skb; 4465 s8 rssi; 4466 int ret; 4467 4468 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4469 return -EINVAL; 4470 4471 if (!phy_ppdu) 4472 return -EINVAL; 4473 4474 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4475 if (!skb) { 4476 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 4477 return -ENOMEM; 4478 } 4479 4480 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 4481 skb_put(skb, len); 4482 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 4483 4484 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4485 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4486 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4487 4488 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4489 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4490 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4491 4492 ret = rtw89_h2c_tx(rtwdev, skb, false); 4493 if (ret) { 4494 rtw89_err(rtwdev, "failed to send h2c\n"); 4495 goto fail; 4496 } 4497 4498 return 0; 4499 fail: 4500 dev_kfree_skb_any(skb); 4501 4502 return ret; 4503 } 4504 4505 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4506 { 4507 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4508 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4509 struct rtw89_h2c_ofld *h2c; 4510 u32 len = sizeof(*h2c); 4511 struct sk_buff *skb; 4512 int ret; 4513 4514 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4515 return -EINVAL; 4516 4517 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4518 if (!skb) { 4519 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4520 return -ENOMEM; 4521 } 4522 4523 skb_put(skb, len); 4524 h2c = (struct rtw89_h2c_ofld *)skb->data; 4525 4526 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4527 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4528 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4529 4530 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4531 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4532 H2C_FUNC_OFLD_TP, 0, 1, len); 4533 4534 ret = rtw89_h2c_tx(rtwdev, skb, false); 4535 if (ret) { 4536 rtw89_err(rtwdev, "failed to send h2c\n"); 4537 goto fail; 4538 } 4539 4540 return 0; 4541 fail: 4542 dev_kfree_skb_any(skb); 4543 4544 return ret; 4545 } 4546 4547 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4548 { 4549 const struct rtw89_chip_info *chip = rtwdev->chip; 4550 struct rtw89_h2c_ra_v1 *h2c_v1; 4551 struct rtw89_h2c_ra *h2c; 4552 u32 len = sizeof(*h2c); 4553 bool format_v1 = false; 4554 struct sk_buff *skb; 4555 int ret; 4556 4557 if (chip->chip_gen == RTW89_CHIP_BE) { 4558 len = sizeof(*h2c_v1); 4559 format_v1 = true; 4560 } 4561 4562 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4563 if (!skb) { 4564 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4565 return -ENOMEM; 4566 } 4567 skb_put(skb, len); 4568 h2c = (struct rtw89_h2c_ra *)skb->data; 4569 rtw89_debug(rtwdev, RTW89_DBG_RA, 4570 "ra cmd msk: %llx ", ra->ra_mask); 4571 4572 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4573 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4574 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4575 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4576 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4577 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4578 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4579 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4580 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4581 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4582 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4583 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4584 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4585 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4586 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4587 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4588 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4589 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4590 4591 if (!format_v1) 4592 goto csi; 4593 4594 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4595 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4596 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4597 4598 csi: 4599 if (!csi) 4600 goto done; 4601 4602 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4603 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4604 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4605 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4606 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4607 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4608 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4609 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4610 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4611 4612 done: 4613 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4614 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4615 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4616 len); 4617 4618 ret = rtw89_h2c_tx(rtwdev, skb, false); 4619 if (ret) { 4620 rtw89_err(rtwdev, "failed to send h2c\n"); 4621 goto fail; 4622 } 4623 4624 return 0; 4625 fail: 4626 dev_kfree_skb_any(skb); 4627 4628 return ret; 4629 } 4630 4631 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4632 { 4633 struct rtw89_btc *btc = &rtwdev->btc; 4634 struct rtw89_btc_dm *dm = &btc->dm; 4635 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4636 struct rtw89_btc_module *module = &init_info->module; 4637 struct rtw89_btc_ant_info *ant = &module->ant; 4638 struct rtw89_h2c_cxinit *h2c; 4639 u32 len = sizeof(*h2c); 4640 struct sk_buff *skb; 4641 int ret; 4642 4643 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4644 if (!skb) { 4645 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4646 return -ENOMEM; 4647 } 4648 skb_put(skb, len); 4649 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4650 4651 h2c->hdr.type = type; 4652 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4653 4654 h2c->ant_type = ant->type; 4655 h2c->ant_num = ant->num; 4656 h2c->ant_iso = ant->isolation; 4657 h2c->ant_info = 4658 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4659 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4660 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4661 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4662 4663 h2c->mod_rfe = module->rfe_type; 4664 h2c->mod_cv = module->cv; 4665 h2c->mod_info = 4666 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4667 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4668 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4669 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4670 h2c->mod_adie_kt = module->kt_ver_adie; 4671 h2c->wl_gch = init_info->wl_guard_ch; 4672 4673 h2c->info = 4674 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4675 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4676 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4677 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4678 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4679 4680 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4681 H2C_CAT_OUTSRC, BTFC_SET, 4682 SET_DRV_INFO, 0, 0, 4683 len); 4684 4685 ret = rtw89_h2c_tx(rtwdev, skb, false); 4686 if (ret) { 4687 rtw89_err(rtwdev, "failed to send h2c\n"); 4688 goto fail; 4689 } 4690 4691 return 0; 4692 fail: 4693 dev_kfree_skb_any(skb); 4694 4695 return ret; 4696 } 4697 4698 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4699 { 4700 struct rtw89_btc *btc = &rtwdev->btc; 4701 struct rtw89_btc_dm *dm = &btc->dm; 4702 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4703 struct rtw89_h2c_cxinit_v7 *h2c; 4704 u32 len = sizeof(*h2c); 4705 struct sk_buff *skb; 4706 int ret; 4707 4708 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4709 if (!skb) { 4710 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4711 return -ENOMEM; 4712 } 4713 skb_put(skb, len); 4714 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4715 4716 h2c->hdr.type = type; 4717 h2c->hdr.ver = btc->ver->fcxinit; 4718 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4719 h2c->init = *init_info; 4720 4721 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4722 H2C_CAT_OUTSRC, BTFC_SET, 4723 SET_DRV_INFO, 0, 0, 4724 len); 4725 4726 ret = rtw89_h2c_tx(rtwdev, skb, false); 4727 if (ret) { 4728 rtw89_err(rtwdev, "failed to send h2c\n"); 4729 goto fail; 4730 } 4731 4732 return 0; 4733 fail: 4734 dev_kfree_skb_any(skb); 4735 4736 return ret; 4737 } 4738 4739 #define PORT_DATA_OFFSET 4 4740 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4741 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4742 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4743 4744 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4745 { 4746 struct rtw89_btc *btc = &rtwdev->btc; 4747 const struct rtw89_btc_ver *ver = btc->ver; 4748 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4749 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4750 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4751 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4752 struct sk_buff *skb; 4753 u32 len; 4754 u8 offset = 0; 4755 u8 *cmd; 4756 int ret; 4757 int i; 4758 4759 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4760 4761 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4762 if (!skb) { 4763 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4764 return -ENOMEM; 4765 } 4766 skb_put(skb, len); 4767 cmd = skb->data; 4768 4769 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4770 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4771 4772 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4773 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4774 4775 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4776 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4777 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4778 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4779 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4780 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4781 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4782 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4783 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4784 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4785 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4786 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4787 4788 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4789 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4790 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4791 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4792 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4793 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4794 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4795 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4796 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4797 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4798 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4799 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4800 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4801 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4802 } 4803 4804 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4805 H2C_CAT_OUTSRC, BTFC_SET, 4806 SET_DRV_INFO, 0, 0, 4807 len); 4808 4809 ret = rtw89_h2c_tx(rtwdev, skb, false); 4810 if (ret) { 4811 rtw89_err(rtwdev, "failed to send h2c\n"); 4812 goto fail; 4813 } 4814 4815 return 0; 4816 fail: 4817 dev_kfree_skb_any(skb); 4818 4819 return ret; 4820 } 4821 4822 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4823 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4824 4825 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4826 { 4827 struct rtw89_btc *btc = &rtwdev->btc; 4828 const struct rtw89_btc_ver *ver = btc->ver; 4829 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4830 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4831 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4832 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4833 struct sk_buff *skb; 4834 u32 len; 4835 u8 *cmd, offset; 4836 int ret; 4837 int i; 4838 4839 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4840 4841 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4842 if (!skb) { 4843 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4844 return -ENOMEM; 4845 } 4846 skb_put(skb, len); 4847 cmd = skb->data; 4848 4849 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4850 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4851 4852 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4853 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4854 4855 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4856 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4857 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4858 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4859 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4860 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4861 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4862 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4863 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4864 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4865 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4866 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4867 4868 offset = PORT_DATA_OFFSET; 4869 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4870 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4871 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4872 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4873 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4874 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4875 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4876 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4877 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4878 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4879 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4880 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4881 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4882 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4883 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4884 } 4885 4886 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4887 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4888 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4889 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4890 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4891 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4892 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4893 4894 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4895 H2C_CAT_OUTSRC, BTFC_SET, 4896 SET_DRV_INFO, 0, 0, 4897 len); 4898 4899 ret = rtw89_h2c_tx(rtwdev, skb, false); 4900 if (ret) { 4901 rtw89_err(rtwdev, "failed to send h2c\n"); 4902 goto fail; 4903 } 4904 4905 return 0; 4906 fail: 4907 dev_kfree_skb_any(skb); 4908 4909 return ret; 4910 } 4911 4912 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4913 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4914 4915 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4916 { 4917 struct rtw89_btc *btc = &rtwdev->btc; 4918 const struct rtw89_btc_ver *ver = btc->ver; 4919 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4920 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4921 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4922 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4923 struct sk_buff *skb; 4924 u32 len; 4925 u8 *cmd, offset; 4926 int ret; 4927 int i; 4928 4929 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4930 4931 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4932 if (!skb) { 4933 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4934 return -ENOMEM; 4935 } 4936 skb_put(skb, len); 4937 cmd = skb->data; 4938 4939 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4940 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4941 4942 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4943 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4944 4945 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4946 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4947 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4948 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4949 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4950 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4951 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4952 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4953 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4954 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4955 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4956 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4957 4958 offset = PORT_DATA_OFFSET; 4959 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4960 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4961 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4962 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4963 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4964 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4965 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4966 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4967 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4968 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4969 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4970 } 4971 4972 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4973 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4974 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4975 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4976 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4977 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4978 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4979 4980 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4981 H2C_CAT_OUTSRC, BTFC_SET, 4982 SET_DRV_INFO, 0, 0, 4983 len); 4984 4985 ret = rtw89_h2c_tx(rtwdev, skb, false); 4986 if (ret) { 4987 rtw89_err(rtwdev, "failed to send h2c\n"); 4988 goto fail; 4989 } 4990 4991 return 0; 4992 fail: 4993 dev_kfree_skb_any(skb); 4994 4995 return ret; 4996 } 4997 4998 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 4999 { 5000 struct rtw89_btc *btc = &rtwdev->btc; 5001 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 5002 struct rtw89_h2c_cxrole_v7 *h2c; 5003 u32 len = sizeof(*h2c); 5004 struct sk_buff *skb; 5005 int ret; 5006 5007 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5008 if (!skb) { 5009 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5010 return -ENOMEM; 5011 } 5012 skb_put(skb, len); 5013 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 5014 5015 h2c->hdr.type = type; 5016 h2c->hdr.ver = btc->ver->fwlrole; 5017 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5018 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 5019 h2c->_u32.role_map = cpu_to_le32(role->role_map); 5020 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 5021 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 5022 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 5023 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 5024 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 5025 5026 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5027 H2C_CAT_OUTSRC, BTFC_SET, 5028 SET_DRV_INFO, 0, 0, 5029 len); 5030 5031 ret = rtw89_h2c_tx(rtwdev, skb, false); 5032 if (ret) { 5033 rtw89_err(rtwdev, "failed to send h2c\n"); 5034 goto fail; 5035 } 5036 5037 return 0; 5038 fail: 5039 dev_kfree_skb_any(skb); 5040 5041 return ret; 5042 } 5043 5044 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 5045 { 5046 struct rtw89_btc *btc = &rtwdev->btc; 5047 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 5048 struct rtw89_h2c_cxrole_v8 *h2c; 5049 u32 len = sizeof(*h2c); 5050 struct sk_buff *skb; 5051 int ret; 5052 5053 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5054 if (!skb) { 5055 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5056 return -ENOMEM; 5057 } 5058 skb_put(skb, len); 5059 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 5060 5061 h2c->hdr.type = type; 5062 h2c->hdr.ver = btc->ver->fwlrole; 5063 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5064 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 5065 h2c->_u32.role_map = cpu_to_le32(role->role_map); 5066 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 5067 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 5068 5069 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5070 H2C_CAT_OUTSRC, BTFC_SET, 5071 SET_DRV_INFO, 0, 0, 5072 len); 5073 5074 ret = rtw89_h2c_tx(rtwdev, skb, false); 5075 if (ret) { 5076 rtw89_err(rtwdev, "failed to send h2c\n"); 5077 goto fail; 5078 } 5079 5080 return 0; 5081 fail: 5082 dev_kfree_skb_any(skb); 5083 5084 return ret; 5085 } 5086 5087 int rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev *rtwdev, u8 type) 5088 { 5089 struct rtw89_btc *btc = &rtwdev->btc; 5090 struct rtw89_btc_fbtc_outsrc_set_info *osi = &btc->dm.ost_info; 5091 struct rtw89_h2c_cxosi *h2c; 5092 u32 len = sizeof(*h2c); 5093 struct sk_buff *skb; 5094 int ret; 5095 5096 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5097 if (!skb) { 5098 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_osi\n"); 5099 return -ENOMEM; 5100 } 5101 skb_put(skb, len); 5102 h2c = (struct rtw89_h2c_cxosi *)skb->data; 5103 5104 h2c->hdr.type = type; 5105 h2c->hdr.ver = btc->ver->fcxosi; 5106 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5107 h2c->osi = *osi; 5108 5109 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5110 H2C_CAT_OUTSRC, BTFC_SET, 5111 SET_DRV_INFO, 0, 0, 5112 len); 5113 5114 ret = rtw89_h2c_tx(rtwdev, skb, false); 5115 if (ret) { 5116 rtw89_err(rtwdev, "failed to send h2c\n"); 5117 goto fail; 5118 } 5119 5120 return 0; 5121 fail: 5122 dev_kfree_skb_any(skb); 5123 5124 return ret; 5125 } 5126 5127 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 5128 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 5129 { 5130 struct rtw89_btc *btc = &rtwdev->btc; 5131 const struct rtw89_btc_ver *ver = btc->ver; 5132 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 5133 struct sk_buff *skb; 5134 u8 *cmd; 5135 int ret; 5136 5137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 5138 if (!skb) { 5139 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5140 return -ENOMEM; 5141 } 5142 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 5143 cmd = skb->data; 5144 5145 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5146 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 5147 5148 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 5149 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 5150 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 5151 if (ver->fcxctrl == 0) 5152 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 5153 5154 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5155 H2C_CAT_OUTSRC, BTFC_SET, 5156 SET_DRV_INFO, 0, 0, 5157 H2C_LEN_CXDRVINFO_CTRL); 5158 5159 ret = rtw89_h2c_tx(rtwdev, skb, false); 5160 if (ret) { 5161 rtw89_err(rtwdev, "failed to send h2c\n"); 5162 goto fail; 5163 } 5164 5165 return 0; 5166 fail: 5167 dev_kfree_skb_any(skb); 5168 5169 return ret; 5170 } 5171 5172 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 5173 { 5174 struct rtw89_btc *btc = &rtwdev->btc; 5175 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 5176 struct rtw89_h2c_cxctrl_v7 *h2c; 5177 u32 len = sizeof(*h2c); 5178 struct sk_buff *skb; 5179 int ret; 5180 5181 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5182 if (!skb) { 5183 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 5184 return -ENOMEM; 5185 } 5186 skb_put(skb, len); 5187 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 5188 5189 h2c->hdr.type = type; 5190 h2c->hdr.ver = btc->ver->fcxctrl; 5191 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 5192 h2c->ctrl = *ctrl; 5193 5194 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5195 H2C_CAT_OUTSRC, BTFC_SET, 5196 SET_DRV_INFO, 0, 0, len); 5197 5198 ret = rtw89_h2c_tx(rtwdev, skb, false); 5199 if (ret) { 5200 rtw89_err(rtwdev, "failed to send h2c\n"); 5201 goto fail; 5202 } 5203 5204 return 0; 5205 fail: 5206 dev_kfree_skb_any(skb); 5207 5208 return ret; 5209 } 5210 5211 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 5212 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 5213 { 5214 struct rtw89_btc *btc = &rtwdev->btc; 5215 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 5216 struct sk_buff *skb; 5217 u8 *cmd; 5218 int ret; 5219 5220 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 5221 if (!skb) { 5222 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 5223 return -ENOMEM; 5224 } 5225 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 5226 cmd = skb->data; 5227 5228 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5229 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 5230 5231 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 5232 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 5233 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 5234 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 5235 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 5236 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 5237 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 5238 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 5239 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 5240 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 5241 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 5242 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 5243 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 5244 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 5245 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 5246 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 5247 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 5248 5249 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5250 H2C_CAT_OUTSRC, BTFC_SET, 5251 SET_DRV_INFO, 0, 0, 5252 H2C_LEN_CXDRVINFO_TRX); 5253 5254 ret = rtw89_h2c_tx(rtwdev, skb, false); 5255 if (ret) { 5256 rtw89_err(rtwdev, "failed to send h2c\n"); 5257 goto fail; 5258 } 5259 5260 return 0; 5261 fail: 5262 dev_kfree_skb_any(skb); 5263 5264 return ret; 5265 } 5266 5267 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 5268 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 5269 { 5270 struct rtw89_btc *btc = &rtwdev->btc; 5271 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5272 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 5273 struct sk_buff *skb; 5274 u8 *cmd; 5275 int ret; 5276 5277 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 5278 if (!skb) { 5279 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5280 return -ENOMEM; 5281 } 5282 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 5283 cmd = skb->data; 5284 5285 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5286 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 5287 5288 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 5289 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 5290 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 5291 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 5292 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 5293 5294 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5295 H2C_CAT_OUTSRC, BTFC_SET, 5296 SET_DRV_INFO, 0, 0, 5297 H2C_LEN_CXDRVINFO_RFK); 5298 5299 ret = rtw89_h2c_tx(rtwdev, skb, false); 5300 if (ret) { 5301 rtw89_err(rtwdev, "failed to send h2c\n"); 5302 goto fail; 5303 } 5304 5305 return 0; 5306 fail: 5307 dev_kfree_skb_any(skb); 5308 5309 return ret; 5310 } 5311 5312 #define H2C_LEN_PKT_OFLD 4 5313 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 5314 { 5315 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5316 struct sk_buff *skb; 5317 unsigned int cond; 5318 u8 *cmd; 5319 int ret; 5320 5321 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 5322 if (!skb) { 5323 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5324 return -ENOMEM; 5325 } 5326 skb_put(skb, H2C_LEN_PKT_OFLD); 5327 cmd = skb->data; 5328 5329 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 5330 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 5331 5332 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5333 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5334 H2C_FUNC_PACKET_OFLD, 1, 1, 5335 H2C_LEN_PKT_OFLD); 5336 5337 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 5338 5339 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5340 if (ret < 0) { 5341 rtw89_debug(rtwdev, RTW89_DBG_FW, 5342 "failed to del pkt ofld: id %d, ret %d\n", 5343 id, ret); 5344 return ret; 5345 } 5346 5347 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 5348 return 0; 5349 } 5350 5351 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 5352 struct sk_buff *skb_ofld) 5353 { 5354 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5355 struct sk_buff *skb; 5356 unsigned int cond; 5357 u8 *cmd; 5358 u8 alloc_id; 5359 int ret; 5360 5361 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 5362 RTW89_MAX_PKT_OFLD_NUM); 5363 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 5364 return -ENOSPC; 5365 5366 *id = alloc_id; 5367 5368 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 5369 if (!skb) { 5370 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5371 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5372 return -ENOMEM; 5373 } 5374 skb_put(skb, H2C_LEN_PKT_OFLD); 5375 cmd = skb->data; 5376 5377 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 5378 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 5379 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 5380 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 5381 5382 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5383 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5384 H2C_FUNC_PACKET_OFLD, 1, 1, 5385 H2C_LEN_PKT_OFLD + skb_ofld->len); 5386 5387 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 5388 5389 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5390 if (ret < 0) { 5391 rtw89_debug(rtwdev, RTW89_DBG_FW, 5392 "failed to add pkt ofld: id %d, ret %d\n", 5393 alloc_id, ret); 5394 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5395 return ret; 5396 } 5397 5398 return 0; 5399 } 5400 5401 static 5402 int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num, 5403 struct list_head *chan_list) 5404 { 5405 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5406 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5407 struct rtw89_h2c_chinfo_elem *elem; 5408 struct rtw89_mac_chinfo_ax *ch_info; 5409 struct rtw89_h2c_chinfo *h2c; 5410 struct sk_buff *skb; 5411 unsigned int cond; 5412 int skb_len; 5413 int ret; 5414 5415 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 5416 5417 skb_len = struct_size(h2c, elem, ch_num); 5418 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5419 if (!skb) { 5420 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5421 return -ENOMEM; 5422 } 5423 skb_put(skb, sizeof(*h2c)); 5424 h2c = (struct rtw89_h2c_chinfo *)skb->data; 5425 5426 h2c->ch_num = ch_num; 5427 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5428 5429 list_for_each_entry(ch_info, chan_list, list) { 5430 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 5431 5432 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 5433 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 5434 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 5435 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 5436 5437 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 5438 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 5439 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 5440 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 5441 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 5442 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 5443 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 5444 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 5445 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 5446 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 5447 5448 if (scan_info->extra_op.set) 5449 elem->w1 |= le32_encode_bits(ch_info->macid_tx, 5450 RTW89_H2C_CHINFO_W1_MACID_TX); 5451 5452 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 5453 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 5454 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 5455 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 5456 5457 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 5458 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 5459 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 5460 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 5461 } 5462 5463 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5464 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5465 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5466 5467 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5468 5469 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5470 if (ret) { 5471 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5472 return ret; 5473 } 5474 5475 return 0; 5476 } 5477 5478 static 5479 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 5480 struct list_head *chan_list, 5481 struct rtw89_vif_link *rtwvif_link) 5482 { 5483 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5484 struct rtw89_h2c_chinfo_elem_be *elem; 5485 struct rtw89_mac_chinfo_be *ch_info; 5486 struct rtw89_h2c_chinfo_be *h2c; 5487 struct sk_buff *skb; 5488 unsigned int cond; 5489 u8 ver = U8_MAX; 5490 int skb_len; 5491 int ret; 5492 5493 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5494 5495 skb_len = struct_size(h2c, elem, ch_num); 5496 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5497 if (!skb) { 5498 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5499 return -ENOMEM; 5500 } 5501 5502 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5503 ver = 0; 5504 5505 skb_put(skb, sizeof(*h2c)); 5506 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5507 5508 h2c->ch_num = ch_num; 5509 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5510 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 5511 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 5512 5513 list_for_each_entry(ch_info, chan_list, list) { 5514 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5515 5516 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5517 le32_encode_bits(ch_info->central_ch, 5518 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5519 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 5520 5521 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 5522 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 5523 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 5524 le32_encode_bits(ch_info->pause_data, 5525 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 5526 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 5527 le32_encode_bits(ch_info->rand_seq_num, 5528 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 5529 le32_encode_bits(ch_info->notify_action, 5530 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 5531 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5532 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5533 le32_encode_bits(ch_info->leave_crit, 5534 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5535 le32_encode_bits(ch_info->chkpt_timer, 5536 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5537 5538 elem->w2 = le32_encode_bits(ch_info->leave_time, 5539 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5540 le32_encode_bits(ch_info->leave_th, 5541 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5542 le32_encode_bits(ch_info->tx_pkt_ctrl, 5543 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5544 5545 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5546 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5547 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5548 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5549 5550 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5551 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5552 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5553 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5554 5555 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5556 le32_encode_bits(ch_info->fw_probe0_ssids, 5557 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5558 5559 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5560 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5561 le32_encode_bits(ch_info->fw_probe0_bssids, 5562 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5563 if (ver == 0) 5564 elem->w0 |= 5565 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5566 else 5567 elem->w7 = le32_encode_bits(ch_info->period, 5568 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5569 } 5570 5571 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5572 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5573 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5574 5575 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5576 5577 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5578 if (ret) { 5579 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5580 return ret; 5581 } 5582 5583 return 0; 5584 } 5585 5586 #define RTW89_SCAN_DELAY_TSF_UNIT 1000000 5587 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5588 struct rtw89_scan_option *option, 5589 struct rtw89_vif_link *rtwvif_link, 5590 bool wowlan) 5591 { 5592 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5593 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5594 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5595 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5596 struct rtw89_h2c_scanofld *h2c; 5597 u32 len = sizeof(*h2c); 5598 struct sk_buff *skb; 5599 unsigned int cond; 5600 u64 tsf = 0; 5601 int ret; 5602 5603 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5604 if (!skb) { 5605 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5606 return -ENOMEM; 5607 } 5608 skb_put(skb, len); 5609 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5610 5611 if (option->delay) { 5612 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5613 if (ret) { 5614 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5615 scan_mode = RTW89_SCAN_IMMEDIATE; 5616 } else { 5617 scan_mode = RTW89_SCAN_DELAY; 5618 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 5619 } 5620 } 5621 5622 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5623 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5624 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5625 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5626 5627 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5628 le32_encode_bits(option->target_ch_mode, 5629 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5630 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5631 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5632 5633 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5634 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5635 5636 if (option->target_ch_mode) { 5637 h2c->w1 |= le32_encode_bits(op->band_width, 5638 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5639 le32_encode_bits(op->primary_channel, 5640 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5641 le32_encode_bits(op->channel, 5642 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5643 h2c->w0 |= le32_encode_bits(op->band_type, 5644 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5645 } 5646 5647 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5648 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5649 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5650 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5651 5652 if (scan_info->extra_op.set) 5653 h2c->w6 = le32_encode_bits(scan_info->extra_op.macid, 5654 RTW89_H2C_SCANOFLD_W6_SECOND_MACID); 5655 5656 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5657 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5658 H2C_FUNC_SCANOFLD, 1, 1, 5659 len); 5660 5661 if (option->enable) 5662 cond = RTW89_SCANOFLD_WAIT_COND_START; 5663 else 5664 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5665 5666 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5667 if (ret) { 5668 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5669 return ret; 5670 } 5671 5672 return 0; 5673 } 5674 5675 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5676 struct rtw89_scan_option *option) 5677 { 5678 struct ieee80211_supported_band *sband; 5679 struct ieee80211_channel *chan; 5680 u8 i, idx; 5681 5682 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5683 if (!sband) { 5684 option->prohib_chan = U64_MAX; 5685 return; 5686 } 5687 5688 for (i = 0; i < sband->n_channels; i++) { 5689 chan = &sband->channels[i]; 5690 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5691 idx = (chan->hw_value - 1) / 4; 5692 option->prohib_chan |= BIT(idx); 5693 } 5694 } 5695 } 5696 5697 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5698 struct rtw89_scan_option *option, 5699 struct rtw89_vif_link *rtwvif_link, 5700 bool wowlan) 5701 { 5702 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5703 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5704 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5705 struct cfg80211_scan_request *req = rtwvif->scan_req; 5706 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5707 struct rtw89_chan *op = &scan_info->op_chan; 5708 struct rtw89_h2c_scanofld_be_opch *opch; 5709 struct rtw89_pktofld_info *pkt_info; 5710 struct rtw89_h2c_scanofld_be *h2c; 5711 struct sk_buff *skb; 5712 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5713 u8 opch_size = sizeof(*opch) * option->num_opch; 5714 u8 probe_id[NUM_NL80211_BANDS]; 5715 u8 scan_offload_ver = U8_MAX; 5716 u8 cfg_len = sizeof(*h2c); 5717 unsigned int cond; 5718 u8 ver = U8_MAX; 5719 void *ptr; 5720 int ret; 5721 u32 len; 5722 u8 i; 5723 5724 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5725 5726 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5727 cfg_len = offsetofend(typeof(*h2c), w8); 5728 scan_offload_ver = 0; 5729 } 5730 5731 len = cfg_len + macc_role_size + opch_size; 5732 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5733 if (!skb) { 5734 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5735 return -ENOMEM; 5736 } 5737 5738 skb_put(skb, len); 5739 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5740 ptr = skb->data; 5741 5742 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5743 5744 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5745 ver = 0; 5746 5747 if (!wowlan) { 5748 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5749 if (pkt_info->wildcard_6ghz) { 5750 /* Provide wildcard as template */ 5751 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5752 break; 5753 } 5754 } 5755 } 5756 5757 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5758 le32_encode_bits(option->scan_mode, 5759 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5760 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5761 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5762 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5763 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5764 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5765 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5766 5767 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5768 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5769 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5770 5771 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5772 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5773 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5774 5775 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5776 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5777 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5778 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5779 5780 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5781 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5782 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5783 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5784 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5785 5786 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5787 5788 h2c->w6 = le32_encode_bits(option->prohib_chan, 5789 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5790 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5791 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5792 if (!wowlan && req->no_cck) { 5793 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5794 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5795 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5796 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5797 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5798 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5799 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5800 } 5801 5802 if (scan_offload_ver == 0) 5803 goto flex_member; 5804 5805 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5806 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5807 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5808 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5809 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5810 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5811 5812 flex_member: 5813 ptr += cfg_len; 5814 5815 for (i = 0; i < option->num_macc_role; i++) { 5816 macc_role = ptr; 5817 macc_role->w0 = 5818 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5819 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5820 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5821 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5822 ptr += sizeof(*macc_role); 5823 } 5824 5825 for (i = 0; i < option->num_opch; i++) { 5826 opch = ptr; 5827 opch->w0 = le32_encode_bits(rtwvif_link->mac_id, 5828 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5829 le32_encode_bits(option->band, 5830 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5831 le32_encode_bits(rtwvif_link->port, 5832 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5833 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5834 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5835 le32_encode_bits(true, 5836 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5837 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5838 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5839 5840 opch->w1 = le32_encode_bits(op->band_type, 5841 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5842 le32_encode_bits(op->band_width, 5843 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5844 le32_encode_bits(0x3, 5845 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5846 le32_encode_bits(op->primary_channel, 5847 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5848 le32_encode_bits(op->channel, 5849 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5850 5851 opch->w2 = le32_encode_bits(0, 5852 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5853 le32_encode_bits(0, 5854 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5855 le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2, 5856 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5857 5858 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5859 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5860 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5861 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5862 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5863 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5864 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5865 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5866 5867 if (ver == 0) 5868 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 5869 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 5870 else 5871 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 5872 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 5873 ptr += sizeof(*opch); 5874 } 5875 5876 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5877 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5878 H2C_FUNC_SCANOFLD_BE, 1, 1, 5879 len); 5880 5881 if (option->enable) 5882 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5883 else 5884 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5885 5886 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5887 if (ret) { 5888 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5889 return ret; 5890 } 5891 5892 return 0; 5893 } 5894 5895 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5896 struct rtw89_fw_h2c_rf_reg_info *info, 5897 u16 len, u8 page) 5898 { 5899 struct sk_buff *skb; 5900 u8 class = info->rf_path == RF_PATH_A ? 5901 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5902 int ret; 5903 5904 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5905 if (!skb) { 5906 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5907 return -ENOMEM; 5908 } 5909 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5910 5911 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5912 H2C_CAT_OUTSRC, class, page, 0, 0, 5913 len); 5914 5915 ret = rtw89_h2c_tx(rtwdev, skb, false); 5916 if (ret) { 5917 rtw89_err(rtwdev, "failed to send h2c\n"); 5918 goto fail; 5919 } 5920 5921 return 0; 5922 fail: 5923 dev_kfree_skb_any(skb); 5924 5925 return ret; 5926 } 5927 5928 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5929 { 5930 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 5931 struct rtw89_fw_h2c_rf_get_mccch_v0 *mccch_v0; 5932 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5933 u32 len = sizeof(*mccch); 5934 struct sk_buff *skb; 5935 u8 ver = U8_MAX; 5936 int ret; 5937 u8 idx; 5938 5939 if (RTW89_CHK_FW_FEATURE(RFK_NTFY_MCC_V0, &rtwdev->fw)) { 5940 len = sizeof(*mccch_v0); 5941 ver = 0; 5942 } 5943 5944 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5945 if (!skb) { 5946 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5947 return -ENOMEM; 5948 } 5949 skb_put(skb, len); 5950 5951 idx = rfk_mcc->table_idx; 5952 if (ver == 0) { 5953 mccch_v0 = (struct rtw89_fw_h2c_rf_get_mccch_v0 *)skb->data; 5954 mccch_v0->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5955 mccch_v0->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5956 mccch_v0->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5957 mccch_v0->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5958 mccch_v0->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5959 mccch_v0->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5960 } else { 5961 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5962 mccch->ch_0_0 = cpu_to_le32(rfk_mcc->ch[0]); 5963 mccch->ch_0_1 = cpu_to_le32(rfk_mcc->ch[0]); 5964 mccch->ch_1_0 = cpu_to_le32(rfk_mcc->ch[1]); 5965 mccch->ch_1_1 = cpu_to_le32(rfk_mcc->ch[1]); 5966 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5967 } 5968 5969 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5970 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5971 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5972 len); 5973 5974 ret = rtw89_h2c_tx(rtwdev, skb, false); 5975 if (ret) { 5976 rtw89_err(rtwdev, "failed to send h2c\n"); 5977 goto fail; 5978 } 5979 5980 return 0; 5981 fail: 5982 dev_kfree_skb_any(skb); 5983 5984 return ret; 5985 } 5986 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5987 5988 int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 5989 { 5990 const struct rtw89_chip_info *chip = rtwdev->chip; 5991 struct rtw89_vif_link *rtwvif_link; 5992 struct rtw89_h2c_rf_ps_info *h2c; 5993 const struct rtw89_chan *chan; 5994 u32 len = sizeof(*h2c); 5995 unsigned int link_id; 5996 struct sk_buff *skb; 5997 int ret; 5998 u8 path; 5999 u32 val; 6000 6001 if (chip->chip_gen != RTW89_CHIP_BE) 6002 return 0; 6003 6004 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6005 if (!skb) { 6006 rtw89_err(rtwdev, "failed to alloc skb for h2c rf ps info\n"); 6007 return -ENOMEM; 6008 } 6009 skb_put(skb, len); 6010 h2c = (struct rtw89_h2c_rf_ps_info *)skb->data; 6011 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 6012 6013 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 6014 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 6015 path = rtw89_phy_get_syn_sel(rtwdev, rtwvif_link->phy_idx); 6016 val = rtw89_chip_chan_to_rf18_val(rtwdev, chan); 6017 6018 if (path >= chip->rf_path_num) { 6019 rtw89_err(rtwdev, "unsupported rf path (%d)\n", path); 6020 ret = -ENOENT; 6021 goto fail; 6022 } 6023 6024 h2c->rf18[path] = cpu_to_le32(val); 6025 h2c->pri_ch[path] = chan->primary_channel; 6026 } 6027 6028 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6029 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 6030 H2C_FUNC_OUTSRC_RF_PS_INFO, 0, 0, 6031 sizeof(*h2c)); 6032 6033 ret = rtw89_h2c_tx(rtwdev, skb, false); 6034 if (ret) { 6035 rtw89_err(rtwdev, "failed to send h2c\n"); 6036 goto fail; 6037 } 6038 6039 return 0; 6040 fail: 6041 dev_kfree_skb_any(skb); 6042 6043 return ret; 6044 } 6045 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ps_info); 6046 6047 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 6048 enum rtw89_phy_idx phy_idx) 6049 { 6050 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 6051 struct rtw89_fw_h2c_rfk_pre_info_common *common; 6052 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 6053 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 6054 struct rtw89_fw_h2c_rfk_pre_info *h2c; 6055 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 6056 u32 len = sizeof(*h2c); 6057 struct sk_buff *skb; 6058 u8 ver = U8_MAX; 6059 u8 tbl, path; 6060 u32 val32; 6061 int ret; 6062 6063 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 6064 len = sizeof(*h2c_v1); 6065 ver = 1; 6066 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 6067 len = sizeof(*h2c_v0); 6068 ver = 0; 6069 } 6070 6071 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6072 if (!skb) { 6073 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 6074 return -ENOMEM; 6075 } 6076 skb_put(skb, len); 6077 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 6078 common = &h2c->base_v1.common; 6079 6080 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 6081 6082 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 6083 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 6084 6085 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 6086 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 6087 common->dbcc.ch[path][tbl] = 6088 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 6089 common->dbcc.band[path][tbl] = 6090 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 6091 } 6092 } 6093 6094 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 6095 tbl_sel[path] = rfk_mcc->data[path].table_idx; 6096 6097 common->tbl.cur_ch[path] = 6098 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 6099 common->tbl.cur_band[path] = 6100 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 6101 6102 if (ver <= 1) 6103 continue; 6104 6105 h2c->cur_bandwidth[path] = 6106 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 6107 } 6108 6109 common->phy_idx = cpu_to_le32(phy_idx); 6110 6111 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 6112 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 6113 6114 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 6115 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 6116 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 6117 6118 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 6119 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 6120 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 6121 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 6122 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 6123 h2c_v0->rfmod0 = cpu_to_le32(val32); 6124 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 6125 h2c_v0->rfmod1 = cpu_to_le32(val32); 6126 6127 if (rtw89_is_mlo_1_1(rtwdev)) 6128 h2c_v0->mlo_1_1 = cpu_to_le32(1); 6129 6130 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 6131 6132 goto done; 6133 } 6134 6135 if (rtw89_is_mlo_1_1(rtwdev)) { 6136 h2c_v1 = &h2c->base_v1; 6137 h2c_v1->mlo_1_1 = cpu_to_le32(1); 6138 } 6139 done: 6140 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6141 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6142 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 6143 len); 6144 6145 ret = rtw89_h2c_tx(rtwdev, skb, false); 6146 if (ret) { 6147 rtw89_err(rtwdev, "failed to send h2c\n"); 6148 goto fail; 6149 } 6150 6151 return 0; 6152 fail: 6153 dev_kfree_skb_any(skb); 6154 6155 return ret; 6156 } 6157 6158 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6159 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 6160 { 6161 struct rtw89_efuse *efuse = &rtwdev->efuse; 6162 struct rtw89_hal *hal = &rtwdev->hal; 6163 struct rtw89_h2c_rf_tssi *h2c; 6164 u32 len = sizeof(*h2c); 6165 struct sk_buff *skb; 6166 int ret; 6167 6168 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6169 if (!skb) { 6170 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 6171 return -ENOMEM; 6172 } 6173 skb_put(skb, len); 6174 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 6175 6176 h2c->len = cpu_to_le16(len); 6177 h2c->phy = phy_idx; 6178 h2c->ch = chan->channel; 6179 h2c->bw = chan->band_width; 6180 h2c->band = chan->band_type; 6181 h2c->hwtx_en = true; 6182 h2c->cv = hal->cv; 6183 h2c->tssi_mode = tssi_mode; 6184 h2c->rfe_type = efuse->rfe_type; 6185 6186 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 6187 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 6188 6189 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6190 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6191 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 6192 6193 ret = rtw89_h2c_tx(rtwdev, skb, false); 6194 if (ret) { 6195 rtw89_err(rtwdev, "failed to send h2c\n"); 6196 goto fail; 6197 } 6198 6199 return 0; 6200 fail: 6201 dev_kfree_skb_any(skb); 6202 6203 return ret; 6204 } 6205 6206 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6207 const struct rtw89_chan *chan) 6208 { 6209 struct rtw89_hal *hal = &rtwdev->hal; 6210 struct rtw89_h2c_rf_iqk_v0 *h2c_v0; 6211 struct rtw89_h2c_rf_iqk *h2c; 6212 u32 len = sizeof(*h2c); 6213 struct sk_buff *skb; 6214 u8 ver = U8_MAX; 6215 int ret; 6216 6217 if (RTW89_CHK_FW_FEATURE(RFK_IQK_V0, &rtwdev->fw)) { 6218 len = sizeof(*h2c_v0); 6219 ver = 0; 6220 } 6221 6222 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6223 if (!skb) { 6224 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 6225 return -ENOMEM; 6226 } 6227 skb_put(skb, len); 6228 6229 if (ver == 0) { 6230 h2c_v0 = (struct rtw89_h2c_rf_iqk_v0 *)skb->data; 6231 6232 h2c_v0->phy_idx = cpu_to_le32(phy_idx); 6233 h2c_v0->dbcc = cpu_to_le32(rtwdev->dbcc_en); 6234 6235 goto done; 6236 } 6237 6238 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 6239 6240 h2c->len = sizeof(*h2c); 6241 h2c->ktype = 0; 6242 h2c->phy = phy_idx; 6243 h2c->kpath = rtw89_phy_get_kpath(rtwdev, phy_idx); 6244 h2c->band = chan->band_type; 6245 h2c->bw = chan->band_width; 6246 h2c->ch = chan->channel; 6247 h2c->cv = hal->cv; 6248 6249 done: 6250 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6251 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6252 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 6253 6254 ret = rtw89_h2c_tx(rtwdev, skb, false); 6255 if (ret) { 6256 rtw89_err(rtwdev, "failed to send h2c\n"); 6257 goto fail; 6258 } 6259 6260 return 0; 6261 fail: 6262 dev_kfree_skb_any(skb); 6263 6264 return ret; 6265 } 6266 6267 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6268 const struct rtw89_chan *chan) 6269 { 6270 struct rtw89_h2c_rf_dpk *h2c; 6271 u32 len = sizeof(*h2c); 6272 struct sk_buff *skb; 6273 int ret; 6274 6275 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6276 if (!skb) { 6277 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 6278 return -ENOMEM; 6279 } 6280 skb_put(skb, len); 6281 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 6282 6283 h2c->len = len; 6284 h2c->phy = phy_idx; 6285 h2c->dpk_enable = true; 6286 h2c->kpath = RF_AB; 6287 h2c->cur_band = chan->band_type; 6288 h2c->cur_bw = chan->band_width; 6289 h2c->cur_ch = chan->channel; 6290 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 6291 6292 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6293 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6294 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 6295 6296 ret = rtw89_h2c_tx(rtwdev, skb, false); 6297 if (ret) { 6298 rtw89_err(rtwdev, "failed to send h2c\n"); 6299 goto fail; 6300 } 6301 6302 return 0; 6303 fail: 6304 dev_kfree_skb_any(skb); 6305 6306 return ret; 6307 } 6308 6309 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6310 const struct rtw89_chan *chan) 6311 { 6312 struct rtw89_hal *hal = &rtwdev->hal; 6313 struct rtw89_h2c_rf_txgapk *h2c; 6314 u32 len = sizeof(*h2c); 6315 struct sk_buff *skb; 6316 int ret; 6317 6318 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6319 if (!skb) { 6320 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 6321 return -ENOMEM; 6322 } 6323 skb_put(skb, len); 6324 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 6325 6326 h2c->len = len; 6327 h2c->ktype = 2; 6328 h2c->phy = phy_idx; 6329 h2c->kpath = RF_AB; 6330 h2c->band = chan->band_type; 6331 h2c->bw = chan->band_width; 6332 h2c->ch = chan->channel; 6333 h2c->cv = hal->cv; 6334 6335 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6336 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6337 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 6338 6339 ret = rtw89_h2c_tx(rtwdev, skb, false); 6340 if (ret) { 6341 rtw89_err(rtwdev, "failed to send h2c\n"); 6342 goto fail; 6343 } 6344 6345 return 0; 6346 fail: 6347 dev_kfree_skb_any(skb); 6348 6349 return ret; 6350 } 6351 6352 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6353 const struct rtw89_chan *chan) 6354 { 6355 struct rtw89_h2c_rf_dack *h2c; 6356 u32 len = sizeof(*h2c); 6357 struct sk_buff *skb; 6358 int ret; 6359 6360 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6361 if (!skb) { 6362 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 6363 return -ENOMEM; 6364 } 6365 skb_put(skb, len); 6366 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 6367 6368 h2c->len = cpu_to_le32(len); 6369 h2c->phy = cpu_to_le32(phy_idx); 6370 h2c->type = cpu_to_le32(0); 6371 6372 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6373 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6374 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 6375 6376 ret = rtw89_h2c_tx(rtwdev, skb, false); 6377 if (ret) { 6378 rtw89_err(rtwdev, "failed to send h2c\n"); 6379 goto fail; 6380 } 6381 6382 return 0; 6383 fail: 6384 dev_kfree_skb_any(skb); 6385 6386 return ret; 6387 } 6388 6389 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6390 const struct rtw89_chan *chan, bool is_chl_k) 6391 { 6392 struct rtw89_h2c_rf_rxdck_v0 *v0; 6393 struct rtw89_h2c_rf_rxdck *h2c; 6394 u32 len = sizeof(*h2c); 6395 struct sk_buff *skb; 6396 int ver = -1; 6397 int ret; 6398 6399 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 6400 len = sizeof(*v0); 6401 ver = 0; 6402 } 6403 6404 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6405 if (!skb) { 6406 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 6407 return -ENOMEM; 6408 } 6409 skb_put(skb, len); 6410 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 6411 6412 v0->len = len; 6413 v0->phy = phy_idx; 6414 v0->is_afe = false; 6415 v0->kpath = RF_AB; 6416 v0->cur_band = chan->band_type; 6417 v0->cur_bw = chan->band_width; 6418 v0->cur_ch = chan->channel; 6419 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 6420 6421 if (ver == 0) 6422 goto hdr; 6423 6424 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 6425 h2c->is_chl_k = is_chl_k; 6426 6427 hdr: 6428 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6429 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6430 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 6431 6432 ret = rtw89_h2c_tx(rtwdev, skb, false); 6433 if (ret) { 6434 rtw89_err(rtwdev, "failed to send h2c\n"); 6435 goto fail; 6436 } 6437 6438 return 0; 6439 fail: 6440 dev_kfree_skb_any(skb); 6441 6442 return ret; 6443 } 6444 6445 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 6446 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 6447 bool rack, bool dack) 6448 { 6449 struct sk_buff *skb; 6450 int ret; 6451 6452 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6453 if (!skb) { 6454 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 6455 return -ENOMEM; 6456 } 6457 skb_put_data(skb, buf, len); 6458 6459 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6460 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 6461 len); 6462 6463 ret = rtw89_h2c_tx(rtwdev, skb, false); 6464 if (ret) { 6465 rtw89_err(rtwdev, "failed to send h2c\n"); 6466 goto fail; 6467 } 6468 6469 return 0; 6470 fail: 6471 dev_kfree_skb_any(skb); 6472 6473 return ret; 6474 } 6475 6476 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 6477 { 6478 struct sk_buff *skb; 6479 int ret; 6480 6481 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 6482 if (!skb) { 6483 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 6484 return -ENOMEM; 6485 } 6486 skb_put_data(skb, buf, len); 6487 6488 ret = rtw89_h2c_tx(rtwdev, skb, false); 6489 if (ret) { 6490 rtw89_err(rtwdev, "failed to send h2c\n"); 6491 goto fail; 6492 } 6493 6494 return 0; 6495 fail: 6496 dev_kfree_skb_any(skb); 6497 6498 return ret; 6499 } 6500 6501 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 6502 { 6503 struct rtw89_early_h2c *early_h2c; 6504 6505 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6506 6507 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 6508 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 6509 } 6510 } 6511 6512 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6513 { 6514 struct rtw89_early_h2c *early_h2c, *tmp; 6515 6516 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 6517 list_del(&early_h2c->list); 6518 kfree(early_h2c->h2c); 6519 kfree(early_h2c); 6520 } 6521 } 6522 6523 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6524 { 6525 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6526 6527 __rtw89_fw_free_all_early_h2c(rtwdev); 6528 } 6529 6530 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 6531 { 6532 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 6533 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6534 6535 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 6536 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 6537 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 6538 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 6539 } 6540 6541 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 6542 struct sk_buff *c2h) 6543 { 6544 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6545 u8 category = attr->category; 6546 u8 class = attr->class; 6547 u8 func = attr->func; 6548 6549 switch (category) { 6550 default: 6551 return false; 6552 case RTW89_C2H_CAT_MAC: 6553 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 6554 case RTW89_C2H_CAT_OUTSRC: 6555 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 6556 } 6557 } 6558 6559 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 6560 { 6561 rtw89_fw_c2h_parse_attr(c2h); 6562 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 6563 goto enqueue; 6564 6565 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 6566 dev_kfree_skb_any(c2h); 6567 return; 6568 6569 enqueue: 6570 skb_queue_tail(&rtwdev->c2h_queue, c2h); 6571 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work); 6572 } 6573 6574 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 6575 struct sk_buff *skb) 6576 { 6577 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6578 u8 category = attr->category; 6579 u8 class = attr->class; 6580 u8 func = attr->func; 6581 u16 len = attr->len; 6582 bool dump = true; 6583 6584 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 6585 return; 6586 6587 switch (category) { 6588 case RTW89_C2H_CAT_TEST: 6589 break; 6590 case RTW89_C2H_CAT_MAC: 6591 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 6592 if (class == RTW89_MAC_C2H_CLASS_INFO && 6593 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 6594 dump = false; 6595 break; 6596 case RTW89_C2H_CAT_OUTSRC: 6597 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 6598 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 6599 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 6600 else 6601 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 6602 break; 6603 } 6604 6605 if (dump) 6606 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 6607 } 6608 6609 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work) 6610 { 6611 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6612 c2h_work); 6613 struct sk_buff *skb, *tmp; 6614 6615 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6616 6617 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 6618 skb_unlink(skb, &rtwdev->c2h_queue); 6619 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 6620 dev_kfree_skb_any(skb); 6621 } 6622 } 6623 6624 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 6625 struct rtw89_mac_h2c_info *info) 6626 { 6627 const struct rtw89_chip_info *chip = rtwdev->chip; 6628 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6629 const u32 *h2c_reg = chip->h2c_regs; 6630 u8 i, val, len; 6631 int ret; 6632 6633 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 6634 rtwdev, chip->h2c_ctrl_reg); 6635 if (ret) { 6636 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 6637 return ret; 6638 } 6639 6640 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 6641 sizeof(info->u.h2creg[0])); 6642 6643 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 6644 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 6645 6646 for (i = 0; i < RTW89_H2CREG_MAX; i++) 6647 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 6648 6649 fw_info->h2c_counter++; 6650 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 6651 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 6652 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 6653 6654 return 0; 6655 } 6656 6657 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 6658 struct rtw89_mac_c2h_info *info) 6659 { 6660 const struct rtw89_chip_info *chip = rtwdev->chip; 6661 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6662 const u32 *c2h_reg = chip->c2h_regs; 6663 u32 ret; 6664 u8 i, val; 6665 6666 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 6667 6668 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 6669 RTW89_C2H_TIMEOUT, false, rtwdev, 6670 chip->c2h_ctrl_reg); 6671 if (ret) { 6672 rtw89_warn(rtwdev, "c2h reg timeout\n"); 6673 return ret; 6674 } 6675 6676 for (i = 0; i < RTW89_C2HREG_MAX; i++) 6677 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 6678 6679 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 6680 6681 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 6682 info->content_len = 6683 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 6684 RTW89_C2HREG_HDR_LEN; 6685 6686 fw_info->c2h_counter++; 6687 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 6688 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 6689 6690 return 0; 6691 } 6692 6693 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 6694 struct rtw89_mac_h2c_info *h2c_info, 6695 struct rtw89_mac_c2h_info *c2h_info) 6696 { 6697 u32 ret; 6698 6699 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 6700 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6701 6702 if (!h2c_info && !c2h_info) 6703 return -EINVAL; 6704 6705 if (!h2c_info) 6706 goto recv_c2h; 6707 6708 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 6709 if (ret) 6710 return ret; 6711 6712 recv_c2h: 6713 if (!c2h_info) 6714 return 0; 6715 6716 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 6717 if (ret) 6718 return ret; 6719 6720 return 0; 6721 } 6722 6723 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 6724 { 6725 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 6726 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 6727 return; 6728 } 6729 6730 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 6731 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 6732 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 6733 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 6734 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 6735 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 6736 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 6737 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 6738 6739 rtw89_fw_prog_cnt_dump(rtwdev); 6740 } 6741 6742 static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev) 6743 { 6744 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6745 struct rtw89_pktofld_info *info, *tmp; 6746 u8 idx; 6747 6748 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 6749 if (!(rtwdev->chip->support_bands & BIT(idx))) 6750 continue; 6751 6752 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 6753 if (test_bit(info->id, rtwdev->pkt_offload)) 6754 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 6755 list_del(&info->list); 6756 kfree(info); 6757 } 6758 } 6759 } 6760 6761 static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev, 6762 struct rtw89_vif_link *rtwvif_link) 6763 { 6764 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6765 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6766 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6767 6768 mac->free_chan_list(rtwdev); 6769 rtw89_hw_scan_release_pkt_list(rtwdev); 6770 6771 rtwvif->scan_req = NULL; 6772 rtwvif->scan_ies = NULL; 6773 scan_info->scanning_vif = NULL; 6774 scan_info->abort = false; 6775 scan_info->connected = false; 6776 } 6777 6778 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 6779 struct cfg80211_scan_request *req, 6780 struct rtw89_pktofld_info *info, 6781 enum nl80211_band band, u8 ssid_idx) 6782 { 6783 if (band != NL80211_BAND_6GHZ) 6784 return false; 6785 6786 if (req->ssids[ssid_idx].ssid_len) { 6787 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 6788 req->ssids[ssid_idx].ssid_len); 6789 info->ssid_len = req->ssids[ssid_idx].ssid_len; 6790 return false; 6791 } else { 6792 info->wildcard_6ghz = true; 6793 return true; 6794 } 6795 } 6796 6797 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 6798 struct rtw89_vif_link *rtwvif_link, 6799 struct sk_buff *skb, u8 ssid_idx) 6800 { 6801 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6802 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6803 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6804 struct cfg80211_scan_request *req = rtwvif->scan_req; 6805 struct rtw89_pktofld_info *info; 6806 struct sk_buff *new; 6807 int ret = 0; 6808 u8 band; 6809 6810 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6811 if (!(rtwdev->chip->support_bands & BIT(band))) 6812 continue; 6813 6814 new = skb_copy(skb, GFP_KERNEL); 6815 if (!new) { 6816 ret = -ENOMEM; 6817 goto out; 6818 } 6819 skb_put_data(new, ies->ies[band], ies->len[band]); 6820 skb_put_data(new, ies->common_ies, ies->common_ie_len); 6821 6822 info = kzalloc(sizeof(*info), GFP_KERNEL); 6823 if (!info) { 6824 ret = -ENOMEM; 6825 kfree_skb(new); 6826 goto out; 6827 } 6828 6829 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 6830 6831 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 6832 if (ret) { 6833 kfree_skb(new); 6834 kfree(info); 6835 goto out; 6836 } 6837 6838 list_add_tail(&info->list, &scan_info->pkt_list[band]); 6839 kfree_skb(new); 6840 } 6841 out: 6842 return ret; 6843 } 6844 6845 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 6846 struct rtw89_vif_link *rtwvif_link, 6847 const u8 *mac_addr) 6848 { 6849 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6850 struct cfg80211_scan_request *req = rtwvif->scan_req; 6851 struct sk_buff *skb; 6852 u8 num = req->n_ssids, i; 6853 int ret; 6854 6855 for (i = 0; i < num; i++) { 6856 skb = ieee80211_probereq_get(rtwdev->hw, mac_addr, 6857 req->ssids[i].ssid, 6858 req->ssids[i].ssid_len, 6859 req->ie_len); 6860 if (!skb) 6861 return -ENOMEM; 6862 6863 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 6864 kfree_skb(skb); 6865 6866 if (ret) 6867 return ret; 6868 } 6869 6870 return 0; 6871 } 6872 6873 static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev, 6874 struct ieee80211_scan_ies *ies, 6875 struct cfg80211_scan_request *req, 6876 struct rtw89_mac_chinfo_ax *ch_info) 6877 { 6878 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6879 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6880 struct cfg80211_scan_6ghz_params *params; 6881 struct rtw89_pktofld_info *info, *tmp; 6882 struct ieee80211_hdr *hdr; 6883 struct sk_buff *skb; 6884 bool found; 6885 int ret = 0; 6886 u8 i; 6887 6888 if (!req->n_6ghz_params) 6889 return 0; 6890 6891 for (i = 0; i < req->n_6ghz_params; i++) { 6892 params = &req->scan_6ghz_params[i]; 6893 6894 if (req->channels[params->channel_idx]->hw_value != 6895 ch_info->pri_ch) 6896 continue; 6897 6898 found = false; 6899 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 6900 if (ether_addr_equal(tmp->bssid, params->bssid)) { 6901 found = true; 6902 break; 6903 } 6904 } 6905 if (found) 6906 continue; 6907 6908 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6909 NULL, 0, req->ie_len); 6910 if (!skb) 6911 return -ENOMEM; 6912 6913 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 6914 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 6915 hdr = (struct ieee80211_hdr *)skb->data; 6916 ether_addr_copy(hdr->addr3, params->bssid); 6917 6918 info = kzalloc(sizeof(*info), GFP_KERNEL); 6919 if (!info) { 6920 ret = -ENOMEM; 6921 kfree_skb(skb); 6922 goto out; 6923 } 6924 6925 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 6926 if (ret) { 6927 kfree_skb(skb); 6928 kfree(info); 6929 goto out; 6930 } 6931 6932 ether_addr_copy(info->bssid, params->bssid); 6933 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 6934 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 6935 6936 ch_info->tx_pkt = true; 6937 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6938 6939 kfree_skb(skb); 6940 } 6941 6942 out: 6943 return ret; 6944 } 6945 6946 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 6947 int chan_type, int ssid_num, 6948 struct rtw89_mac_chinfo_ax *ch_info) 6949 { 6950 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6951 struct rtw89_pktofld_info *info; 6952 u8 probe_count = 0; 6953 6954 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6955 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6956 ch_info->bw = RTW89_SCAN_WIDTH; 6957 ch_info->tx_pkt = true; 6958 ch_info->cfg_tx_pwr = false; 6959 ch_info->tx_pwr_idx = 0; 6960 ch_info->tx_null = false; 6961 ch_info->pause_data = false; 6962 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6963 6964 if (ssid_num) { 6965 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6966 if (info->channel_6ghz && 6967 ch_info->pri_ch != info->channel_6ghz) 6968 continue; 6969 else if (info->channel_6ghz && probe_count != 0) 6970 ch_info->period += RTW89_CHANNEL_TIME_6G; 6971 6972 if (info->wildcard_6ghz) 6973 continue; 6974 6975 ch_info->pkt_id[probe_count++] = info->id; 6976 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6977 break; 6978 } 6979 ch_info->num_pkt = probe_count; 6980 } 6981 6982 switch (chan_type) { 6983 case RTW89_CHAN_DFS: 6984 if (ch_info->ch_band != RTW89_BAND_6G) 6985 ch_info->period = max_t(u8, ch_info->period, 6986 RTW89_DFS_CHAN_TIME); 6987 ch_info->dwell_time = RTW89_DWELL_TIME; 6988 break; 6989 case RTW89_CHAN_ACTIVE: 6990 break; 6991 default: 6992 rtw89_err(rtwdev, "Channel type out of bound\n"); 6993 } 6994 } 6995 6996 static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type, 6997 int ssid_num, 6998 struct rtw89_mac_chinfo_ax *ch_info) 6999 { 7000 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7001 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 7002 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 7003 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7004 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 7005 struct cfg80211_scan_request *req = rtwvif->scan_req; 7006 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 7007 struct rtw89_pktofld_info *info; 7008 u8 band, probe_count = 0; 7009 int ret; 7010 7011 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7012 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7013 ch_info->bw = RTW89_SCAN_WIDTH; 7014 ch_info->tx_pkt = true; 7015 ch_info->cfg_tx_pwr = false; 7016 ch_info->tx_pwr_idx = 0; 7017 ch_info->tx_null = false; 7018 ch_info->pause_data = false; 7019 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7020 7021 if (ch_info->ch_band == RTW89_BAND_6G) { 7022 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 7023 !ch_info->is_psc) { 7024 ch_info->tx_pkt = false; 7025 if (!req->duration_mandatory) 7026 ch_info->period -= RTW89_DWELL_TIME_6G; 7027 } 7028 } 7029 7030 ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info); 7031 if (ret) 7032 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 7033 7034 if (ssid_num) { 7035 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 7036 7037 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 7038 if (info->channel_6ghz && 7039 ch_info->pri_ch != info->channel_6ghz) 7040 continue; 7041 else if (info->channel_6ghz && probe_count != 0) 7042 ch_info->period += RTW89_CHANNEL_TIME_6G; 7043 7044 if (info->wildcard_6ghz) 7045 continue; 7046 7047 ch_info->pkt_id[probe_count++] = info->id; 7048 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7049 break; 7050 } 7051 ch_info->num_pkt = probe_count; 7052 } 7053 7054 switch (chan_type) { 7055 case RTW89_CHAN_OPERATE: 7056 ch_info->central_ch = op->channel; 7057 ch_info->pri_ch = op->primary_channel; 7058 ch_info->ch_band = op->band_type; 7059 ch_info->bw = op->band_width; 7060 ch_info->tx_null = true; 7061 ch_info->num_pkt = 0; 7062 break; 7063 case RTW89_CHAN_DFS: 7064 if (ch_info->ch_band != RTW89_BAND_6G) 7065 ch_info->period = max_t(u8, ch_info->period, 7066 RTW89_DFS_CHAN_TIME); 7067 ch_info->dwell_time = RTW89_DWELL_TIME; 7068 ch_info->pause_data = true; 7069 break; 7070 case RTW89_CHAN_ACTIVE: 7071 ch_info->pause_data = true; 7072 break; 7073 case RTW89_CHAN_EXTRA_OP: 7074 ch_info->central_ch = ext->chan.channel; 7075 ch_info->pri_ch = ext->chan.primary_channel; 7076 ch_info->ch_band = ext->chan.band_type; 7077 ch_info->bw = ext->chan.band_width; 7078 ch_info->tx_null = true; 7079 ch_info->num_pkt = 0; 7080 ch_info->macid_tx = true; 7081 break; 7082 default: 7083 rtw89_err(rtwdev, "Channel type out of bound\n"); 7084 } 7085 } 7086 7087 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 7088 int ssid_num, 7089 struct rtw89_mac_chinfo_be *ch_info) 7090 { 7091 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7092 struct rtw89_pktofld_info *info; 7093 u8 probe_count = 0, i; 7094 7095 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7096 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7097 ch_info->bw = RTW89_SCAN_WIDTH; 7098 ch_info->tx_null = false; 7099 ch_info->pause_data = false; 7100 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7101 7102 if (ssid_num) { 7103 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 7104 ch_info->pkt_id[probe_count++] = info->id; 7105 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7106 break; 7107 } 7108 } 7109 7110 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 7111 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 7112 7113 switch (chan_type) { 7114 case RTW89_CHAN_DFS: 7115 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 7116 ch_info->dwell_time = RTW89_DWELL_TIME; 7117 break; 7118 case RTW89_CHAN_ACTIVE: 7119 break; 7120 default: 7121 rtw89_warn(rtwdev, "Channel type out of bound\n"); 7122 break; 7123 } 7124 } 7125 7126 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 7127 int ssid_num, 7128 struct rtw89_mac_chinfo_be *ch_info) 7129 { 7130 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7131 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 7132 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7133 struct cfg80211_scan_request *req = rtwvif->scan_req; 7134 struct rtw89_pktofld_info *info; 7135 u8 band, probe_count = 0, i; 7136 7137 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7138 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7139 ch_info->bw = RTW89_SCAN_WIDTH; 7140 ch_info->tx_null = false; 7141 ch_info->pause_data = false; 7142 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7143 7144 if (ssid_num) { 7145 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 7146 7147 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 7148 if (info->channel_6ghz && 7149 ch_info->pri_ch != info->channel_6ghz) 7150 continue; 7151 7152 if (info->wildcard_6ghz) 7153 continue; 7154 7155 ch_info->pkt_id[probe_count++] = info->id; 7156 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7157 break; 7158 } 7159 } 7160 7161 if (ch_info->ch_band == RTW89_BAND_6G) { 7162 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 7163 !ch_info->is_psc) { 7164 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7165 if (!req->duration_mandatory) 7166 ch_info->period -= RTW89_DWELL_TIME_6G; 7167 } 7168 } 7169 7170 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 7171 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 7172 7173 switch (chan_type) { 7174 case RTW89_CHAN_DFS: 7175 if (ch_info->ch_band != RTW89_BAND_6G) 7176 ch_info->period = 7177 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 7178 ch_info->dwell_time = RTW89_DWELL_TIME; 7179 ch_info->pause_data = true; 7180 break; 7181 case RTW89_CHAN_ACTIVE: 7182 ch_info->pause_data = true; 7183 break; 7184 default: 7185 rtw89_warn(rtwdev, "Channel type out of bound\n"); 7186 break; 7187 } 7188 } 7189 7190 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 7191 struct rtw89_vif_link *rtwvif_link) 7192 { 7193 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7194 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7195 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7196 struct ieee80211_channel *channel; 7197 struct list_head chan_list; 7198 int list_len; 7199 enum rtw89_chan_type type; 7200 int ret = 0; 7201 u32 idx; 7202 7203 INIT_LIST_HEAD(&chan_list); 7204 for (idx = 0, list_len = 0; 7205 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 7206 idx++, list_len++) { 7207 channel = nd_config->channels[idx]; 7208 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7209 if (!ch_info) { 7210 ret = -ENOMEM; 7211 goto out; 7212 } 7213 7214 ch_info->period = RTW89_CHANNEL_TIME; 7215 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7216 ch_info->central_ch = channel->hw_value; 7217 ch_info->pri_ch = channel->hw_value; 7218 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7219 7220 if (channel->flags & 7221 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7222 type = RTW89_CHAN_DFS; 7223 else 7224 type = RTW89_CHAN_ACTIVE; 7225 7226 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 7227 list_add_tail(&ch_info->list, &chan_list); 7228 } 7229 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list); 7230 7231 out: 7232 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7233 list_del(&ch_info->list); 7234 kfree(ch_info); 7235 } 7236 7237 return ret; 7238 } 7239 7240 static int rtw89_hw_scan_add_op_types_ax(struct rtw89_dev *rtwdev, 7241 enum rtw89_chan_type type, 7242 struct list_head *chan_list, 7243 struct cfg80211_scan_request *req, 7244 int *off_chan_time) 7245 { 7246 struct rtw89_mac_chinfo_ax *tmp; 7247 7248 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 7249 if (!tmp) 7250 return -ENOMEM; 7251 7252 switch (type) { 7253 case RTW89_CHAN_OPERATE: 7254 tmp->period = req->duration_mandatory ? 7255 req->duration : RTW89_CHANNEL_TIME; 7256 *off_chan_time = 0; 7257 break; 7258 case RTW89_CHAN_EXTRA_OP: 7259 tmp->period = RTW89_CHANNEL_TIME_EXTRA_OP; 7260 /* still calc @off_chan_time for scan op */ 7261 *off_chan_time += tmp->period; 7262 break; 7263 default: 7264 kfree(tmp); 7265 return -EINVAL; 7266 } 7267 7268 rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp); 7269 list_add_tail(&tmp->list, chan_list); 7270 7271 return 0; 7272 } 7273 7274 int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev, 7275 struct rtw89_vif_link *rtwvif_link) 7276 { 7277 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7278 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 7279 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7280 struct cfg80211_scan_request *req = rtwvif->scan_req; 7281 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7282 struct ieee80211_channel *channel; 7283 struct list_head chan_list; 7284 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 7285 enum rtw89_chan_type type; 7286 int off_chan_time = 0; 7287 int ret; 7288 u32 idx; 7289 7290 INIT_LIST_HEAD(&chan_list); 7291 7292 for (idx = 0; idx < req->n_channels; idx++) { 7293 channel = req->channels[idx]; 7294 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7295 if (!ch_info) { 7296 ret = -ENOMEM; 7297 goto out; 7298 } 7299 7300 if (req->duration) 7301 ch_info->period = req->duration; 7302 else if (channel->band == NL80211_BAND_6GHZ) 7303 ch_info->period = RTW89_CHANNEL_TIME_6G + 7304 RTW89_DWELL_TIME_6G; 7305 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 7306 ch_info->period = RTW89_P2P_CHAN_TIME; 7307 else 7308 ch_info->period = RTW89_CHANNEL_TIME; 7309 7310 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7311 ch_info->central_ch = channel->hw_value; 7312 ch_info->pri_ch = channel->hw_value; 7313 ch_info->rand_seq_num = random_seq; 7314 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7315 7316 if (channel->flags & 7317 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7318 type = RTW89_CHAN_DFS; 7319 else 7320 type = RTW89_CHAN_ACTIVE; 7321 rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info); 7322 7323 if (!(scan_info->connected && 7324 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME)) 7325 goto next; 7326 7327 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_OPERATE, 7328 &chan_list, req, &off_chan_time); 7329 if (ret) { 7330 kfree(ch_info); 7331 goto out; 7332 } 7333 7334 if (!ext->set) 7335 goto next; 7336 7337 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_EXTRA_OP, 7338 &chan_list, req, &off_chan_time); 7339 if (ret) { 7340 kfree(ch_info); 7341 goto out; 7342 } 7343 7344 next: 7345 list_add_tail(&ch_info->list, &chan_list); 7346 off_chan_time += ch_info->period; 7347 } 7348 7349 list_splice_tail(&chan_list, &scan_info->chan_list); 7350 return 0; 7351 7352 out: 7353 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7354 list_del(&ch_info->list); 7355 kfree(ch_info); 7356 } 7357 7358 return ret; 7359 } 7360 7361 void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev) 7362 { 7363 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7364 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7365 7366 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7367 list_del(&ch_info->list); 7368 kfree(ch_info); 7369 } 7370 } 7371 7372 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 7373 struct rtw89_vif_link *rtwvif_link) 7374 { 7375 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7376 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7377 unsigned int list_len = 0; 7378 struct list_head list; 7379 int ret; 7380 7381 INIT_LIST_HEAD(&list); 7382 7383 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7384 list_move_tail(&ch_info->list, &list); 7385 7386 list_len++; 7387 if (list_len == RTW89_SCAN_LIST_LIMIT_AX) 7388 break; 7389 } 7390 7391 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list); 7392 7393 list_for_each_entry_safe(ch_info, tmp, &list, list) { 7394 list_del(&ch_info->list); 7395 kfree(ch_info); 7396 } 7397 7398 return ret; 7399 } 7400 7401 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 7402 struct rtw89_vif_link *rtwvif_link) 7403 { 7404 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7405 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7406 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7407 struct ieee80211_channel *channel; 7408 struct list_head chan_list; 7409 enum rtw89_chan_type type; 7410 int list_len, ret; 7411 u32 idx; 7412 7413 INIT_LIST_HEAD(&chan_list); 7414 7415 for (idx = 0, list_len = 0; 7416 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 7417 idx++, list_len++) { 7418 channel = nd_config->channels[idx]; 7419 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7420 if (!ch_info) { 7421 ret = -ENOMEM; 7422 goto out; 7423 } 7424 7425 ch_info->period = RTW89_CHANNEL_TIME; 7426 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7427 ch_info->central_ch = channel->hw_value; 7428 ch_info->pri_ch = channel->hw_value; 7429 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7430 7431 if (channel->flags & 7432 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7433 type = RTW89_CHAN_DFS; 7434 else 7435 type = RTW89_CHAN_ACTIVE; 7436 7437 rtw89_pno_scan_add_chan_be(rtwdev, type, 7438 nd_config->n_match_sets, ch_info); 7439 list_add_tail(&ch_info->list, &chan_list); 7440 } 7441 7442 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 7443 rtwvif_link); 7444 7445 out: 7446 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7447 list_del(&ch_info->list); 7448 kfree(ch_info); 7449 } 7450 7451 return ret; 7452 } 7453 7454 int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev, 7455 struct rtw89_vif_link *rtwvif_link) 7456 { 7457 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7458 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7459 struct cfg80211_scan_request *req = rtwvif->scan_req; 7460 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7461 struct ieee80211_channel *channel; 7462 struct list_head chan_list; 7463 enum rtw89_chan_type type; 7464 bool random_seq; 7465 int ret; 7466 u32 idx; 7467 7468 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 7469 INIT_LIST_HEAD(&chan_list); 7470 7471 for (idx = 0; idx < req->n_channels; idx++) { 7472 channel = req->channels[idx]; 7473 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7474 if (!ch_info) { 7475 ret = -ENOMEM; 7476 goto out; 7477 } 7478 7479 if (req->duration) 7480 ch_info->period = req->duration; 7481 else if (channel->band == NL80211_BAND_6GHZ) 7482 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 7483 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 7484 ch_info->period = RTW89_P2P_CHAN_TIME; 7485 else 7486 ch_info->period = RTW89_CHANNEL_TIME; 7487 7488 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7489 ch_info->central_ch = channel->hw_value; 7490 ch_info->pri_ch = channel->hw_value; 7491 ch_info->rand_seq_num = random_seq; 7492 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7493 7494 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7495 type = RTW89_CHAN_DFS; 7496 else 7497 type = RTW89_CHAN_ACTIVE; 7498 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 7499 7500 list_add_tail(&ch_info->list, &chan_list); 7501 } 7502 7503 list_splice_tail(&chan_list, &scan_info->chan_list); 7504 return 0; 7505 7506 out: 7507 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7508 list_del(&ch_info->list); 7509 kfree(ch_info); 7510 } 7511 7512 return ret; 7513 } 7514 7515 void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev) 7516 { 7517 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7518 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7519 7520 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7521 list_del(&ch_info->list); 7522 kfree(ch_info); 7523 } 7524 } 7525 7526 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 7527 struct rtw89_vif_link *rtwvif_link) 7528 { 7529 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7530 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7531 unsigned int list_len = 0; 7532 struct list_head list; 7533 int ret; 7534 7535 INIT_LIST_HEAD(&list); 7536 7537 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7538 list_move_tail(&ch_info->list, &list); 7539 7540 list_len++; 7541 if (list_len == RTW89_SCAN_LIST_LIMIT_BE) 7542 break; 7543 } 7544 7545 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list, 7546 rtwvif_link); 7547 7548 list_for_each_entry_safe(ch_info, tmp, &list, list) { 7549 list_del(&ch_info->list); 7550 kfree(ch_info); 7551 } 7552 7553 return ret; 7554 } 7555 7556 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 7557 struct rtw89_vif_link *rtwvif_link, 7558 const u8 *mac_addr) 7559 { 7560 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7561 int ret; 7562 7563 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr); 7564 if (ret) { 7565 rtw89_err(rtwdev, "Update probe request failed\n"); 7566 goto out; 7567 } 7568 ret = mac->prep_chan_list(rtwdev, rtwvif_link); 7569 out: 7570 return ret; 7571 } 7572 7573 static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev, 7574 struct rtw89_vif_link *rtwvif_link, 7575 u16 tu) 7576 { 7577 struct ieee80211_p2p_noa_desc noa_desc = {}; 7578 u64 tsf; 7579 int ret; 7580 7581 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 7582 if (ret) { 7583 rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__); 7584 return; 7585 } 7586 7587 noa_desc.start_time = cpu_to_le32(tsf); 7588 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu)); 7589 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu)); 7590 noa_desc.count = 1; 7591 7592 rtw89_p2p_noa_renew(rtwvif_link); 7593 rtw89_p2p_noa_append(rtwvif_link, &noa_desc); 7594 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link); 7595 } 7596 7597 static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev, 7598 const struct cfg80211_scan_request *req) 7599 { 7600 const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; 7601 const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7602 const struct rtw89_chip_info *chip = rtwdev->chip; 7603 struct rtw89_mac_chinfo_ax *chinfo_ax; 7604 struct rtw89_mac_chinfo_be *chinfo_be; 7605 struct rtw89_vif_link *rtwvif_link; 7606 struct list_head *pos, *tmp; 7607 struct ieee80211_vif *vif; 7608 struct rtw89_vif *rtwvif; 7609 u16 tu = 0; 7610 7611 lockdep_assert_wiphy(rtwdev->hw->wiphy); 7612 7613 list_for_each_safe(pos, tmp, &scan_info->chan_list) { 7614 switch (chip->chip_gen) { 7615 case RTW89_CHIP_AX: 7616 chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list); 7617 tu += chinfo_ax->period; 7618 break; 7619 case RTW89_CHIP_BE: 7620 chinfo_be = list_entry(pos, typeof(*chinfo_be), list); 7621 tu += chinfo_be->period; 7622 break; 7623 default: 7624 rtw89_warn(rtwdev, "%s: invalid chip gen %d\n", 7625 __func__, chip->chip_gen); 7626 return; 7627 } 7628 } 7629 7630 if (unlikely(tu == 0)) { 7631 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 7632 "%s: cannot estimate needed TU\n", __func__); 7633 return; 7634 } 7635 7636 list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) { 7637 unsigned int link_id; 7638 7639 vif = rtwvif_to_vif(rtwvif); 7640 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 7641 continue; 7642 7643 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 7644 rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, tu); 7645 } 7646 } 7647 7648 static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev, 7649 struct rtw89_vif *scan_rtwvif, 7650 const struct rtw89_chan *scan_op) 7651 { 7652 struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; 7653 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7654 struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 7655 struct rtw89_vif *tmp; 7656 7657 ext->set = false; 7658 if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_EXTRA_OP, &rtwdev->fw)) 7659 return; 7660 7661 list_for_each_entry(tmp, &mgnt->active_list, mgnt_entry) { 7662 const struct rtw89_chan *tmp_chan; 7663 struct rtw89_vif_link *tmp_link; 7664 7665 if (tmp == scan_rtwvif) 7666 continue; 7667 7668 tmp_link = rtw89_vif_get_link_inst(tmp, 0); 7669 if (unlikely(!tmp_link)) { 7670 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 7671 "hw scan: no HW-0 link for extra op\n"); 7672 continue; 7673 } 7674 7675 tmp_chan = rtw89_chan_get(rtwdev, tmp_link->chanctx_idx); 7676 *ext = (struct rtw89_hw_scan_extra_op){ 7677 .set = true, 7678 .macid = tmp_link->mac_id, 7679 .chan = *tmp_chan, 7680 }; 7681 7682 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 7683 "hw scan: extra op: center %d primary %d\n", 7684 ext->chan.channel, ext->chan.primary_channel); 7685 break; 7686 } 7687 } 7688 7689 int rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 7690 struct rtw89_vif_link *rtwvif_link, 7691 struct ieee80211_scan_request *scan_req) 7692 { 7693 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7694 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); 7695 struct cfg80211_scan_request *req = &scan_req->req; 7696 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 7697 rtwvif_link->chanctx_idx); 7698 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7699 struct rtw89_chanctx_pause_parm pause_parm = { 7700 .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN, 7701 .trigger = rtwvif_link, 7702 }; 7703 u32 rx_fltr = rtwdev->hal.rx_fltr; 7704 u8 mac_addr[ETH_ALEN]; 7705 u32 reg; 7706 int ret; 7707 7708 /* clone op and keep it during scan */ 7709 rtwdev->scan_info.op_chan = *chan; 7710 7711 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 7712 "hw scan: op: center %d primary %d\n", 7713 chan->channel, chan->primary_channel); 7714 7715 rtw89_hw_scan_set_extra_op_info(rtwdev, rtwvif, chan); 7716 7717 rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 7718 rtwdev->scan_info.scanning_vif = rtwvif_link; 7719 rtwdev->scan_info.abort = false; 7720 rtwvif->scan_ies = &scan_req->ies; 7721 rtwvif->scan_req = req; 7722 7723 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 7724 get_random_mask_addr(mac_addr, req->mac_addr, 7725 req->mac_addr_mask); 7726 else 7727 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 7728 7729 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr); 7730 if (ret) { 7731 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); 7732 return ret; 7733 } 7734 7735 ieee80211_stop_queues(rtwdev->hw); 7736 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 7737 7738 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 7739 7740 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 7741 rx_fltr &= ~B_AX_A_BC; 7742 rx_fltr &= ~B_AX_A_A1_MATCH; 7743 7744 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 7745 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); 7746 7747 rtw89_chanctx_pause(rtwdev, &pause_parm); 7748 7749 if (mode == RTW89_ENTITY_MODE_MCC) 7750 rtw89_hw_scan_update_beacon_noa(rtwdev, req); 7751 7752 return 0; 7753 } 7754 7755 struct rtw89_hw_scan_complete_cb_data { 7756 struct rtw89_vif_link *rtwvif_link; 7757 bool aborted; 7758 }; 7759 7760 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) 7761 { 7762 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7763 struct rtw89_hw_scan_complete_cb_data *cb_data = data; 7764 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; 7765 struct cfg80211_scan_info info = { 7766 .aborted = cb_data->aborted, 7767 }; 7768 u32 reg; 7769 7770 if (!rtwvif_link) 7771 return -EINVAL; 7772 7773 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 7774 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 7775 7776 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 7777 ieee80211_scan_completed(rtwdev->hw, &info); 7778 ieee80211_wake_queues(rtwdev->hw); 7779 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 7780 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 7781 7782 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); 7783 7784 return 0; 7785 } 7786 7787 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 7788 struct rtw89_vif_link *rtwvif_link, 7789 bool aborted) 7790 { 7791 struct rtw89_hw_scan_complete_cb_data cb_data = { 7792 .rtwvif_link = rtwvif_link, 7793 .aborted = aborted, 7794 }; 7795 const struct rtw89_chanctx_cb_parm cb_parm = { 7796 .cb = rtw89_hw_scan_complete_cb, 7797 .data = &cb_data, 7798 .caller = __func__, 7799 }; 7800 7801 /* The things here needs to be done after setting channel (for coex) 7802 * and before proceeding entity mode (for MCC). So, pass a callback 7803 * of them for the right sequence rather than doing them directly. 7804 */ 7805 rtw89_chanctx_proceed(rtwdev, &cb_parm); 7806 } 7807 7808 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 7809 struct rtw89_vif_link *rtwvif_link) 7810 { 7811 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7812 int ret; 7813 7814 scan_info->abort = true; 7815 7816 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 7817 if (ret) 7818 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 7819 7820 /* Indicate ieee80211_scan_completed() before returning, which is safe 7821 * because scan abort command always waits for completion of 7822 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 7823 * work properly. 7824 */ 7825 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 7826 } 7827 7828 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 7829 { 7830 struct rtw89_vif_link *rtwvif_link; 7831 struct rtw89_vif *rtwvif; 7832 unsigned int link_id; 7833 7834 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 7835 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 7836 /* This variable implies connected or during attempt to connect */ 7837 if (!is_zero_ether_addr(rtwvif_link->bssid)) 7838 return true; 7839 } 7840 } 7841 7842 return false; 7843 } 7844 7845 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 7846 struct rtw89_vif_link *rtwvif_link, 7847 bool enable) 7848 { 7849 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7850 struct rtw89_scan_option opt = {0}; 7851 bool connected; 7852 int ret = 0; 7853 7854 if (!rtwvif_link) 7855 return -EINVAL; 7856 7857 connected = rtwdev->scan_info.connected; 7858 opt.enable = enable; 7859 opt.target_ch_mode = connected; 7860 if (enable) { 7861 ret = mac->add_chan_list(rtwdev, rtwvif_link); 7862 if (ret) 7863 goto out; 7864 } 7865 7866 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 7867 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 7868 opt.scan_mode = RTW89_SCAN_MODE_SA; 7869 opt.band = rtwvif_link->mac_idx; 7870 opt.num_macc_role = 0; 7871 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 7872 opt.num_opch = connected ? 1 : 0; 7873 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 7874 } 7875 7876 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false); 7877 out: 7878 return ret; 7879 } 7880 7881 #define H2C_FW_CPU_EXCEPTION_LEN 4 7882 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 7883 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 7884 { 7885 struct sk_buff *skb; 7886 int ret; 7887 7888 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 7889 if (!skb) { 7890 rtw89_err(rtwdev, 7891 "failed to alloc skb for fw cpu exception\n"); 7892 return -ENOMEM; 7893 } 7894 7895 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 7896 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 7897 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 7898 7899 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7900 H2C_CAT_TEST, 7901 H2C_CL_FW_STATUS_TEST, 7902 H2C_FUNC_CPU_EXCEPTION, 0, 0, 7903 H2C_FW_CPU_EXCEPTION_LEN); 7904 7905 ret = rtw89_h2c_tx(rtwdev, skb, false); 7906 if (ret) { 7907 rtw89_err(rtwdev, "failed to send h2c\n"); 7908 goto fail; 7909 } 7910 7911 return 0; 7912 7913 fail: 7914 dev_kfree_skb_any(skb); 7915 return ret; 7916 } 7917 7918 #define H2C_PKT_DROP_LEN 24 7919 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 7920 const struct rtw89_pkt_drop_params *params) 7921 { 7922 struct sk_buff *skb; 7923 int ret; 7924 7925 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 7926 if (!skb) { 7927 rtw89_err(rtwdev, 7928 "failed to alloc skb for packet drop\n"); 7929 return -ENOMEM; 7930 } 7931 7932 switch (params->sel) { 7933 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 7934 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 7935 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 7936 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 7937 case RTW89_PKT_DROP_SEL_BAND_ONCE: 7938 break; 7939 default: 7940 rtw89_debug(rtwdev, RTW89_DBG_FW, 7941 "H2C of pkt drop might not fully support sel: %d yet\n", 7942 params->sel); 7943 break; 7944 } 7945 7946 skb_put(skb, H2C_PKT_DROP_LEN); 7947 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 7948 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 7949 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 7950 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 7951 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 7952 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 7953 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 7954 params->macid_band_sel[0]); 7955 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 7956 params->macid_band_sel[1]); 7957 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 7958 params->macid_band_sel[2]); 7959 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 7960 params->macid_band_sel[3]); 7961 7962 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7963 H2C_CAT_MAC, 7964 H2C_CL_MAC_FW_OFLD, 7965 H2C_FUNC_PKT_DROP, 0, 0, 7966 H2C_PKT_DROP_LEN); 7967 7968 ret = rtw89_h2c_tx(rtwdev, skb, false); 7969 if (ret) { 7970 rtw89_err(rtwdev, "failed to send h2c\n"); 7971 goto fail; 7972 } 7973 7974 return 0; 7975 7976 fail: 7977 dev_kfree_skb_any(skb); 7978 return ret; 7979 } 7980 7981 #define H2C_KEEP_ALIVE_LEN 4 7982 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7983 bool enable) 7984 { 7985 struct sk_buff *skb; 7986 u8 pkt_id = 0; 7987 int ret; 7988 7989 if (enable) { 7990 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7991 RTW89_PKT_OFLD_TYPE_NULL_DATA, 7992 &pkt_id); 7993 if (ret) 7994 return -EPERM; 7995 } 7996 7997 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 7998 if (!skb) { 7999 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8000 return -ENOMEM; 8001 } 8002 8003 skb_put(skb, H2C_KEEP_ALIVE_LEN); 8004 8005 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 8006 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 8007 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 8008 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 8009 8010 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8011 H2C_CAT_MAC, 8012 H2C_CL_MAC_WOW, 8013 H2C_FUNC_KEEP_ALIVE, 0, 1, 8014 H2C_KEEP_ALIVE_LEN); 8015 8016 ret = rtw89_h2c_tx(rtwdev, skb, false); 8017 if (ret) { 8018 rtw89_err(rtwdev, "failed to send h2c\n"); 8019 goto fail; 8020 } 8021 8022 return 0; 8023 8024 fail: 8025 dev_kfree_skb_any(skb); 8026 8027 return ret; 8028 } 8029 8030 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8031 bool enable) 8032 { 8033 struct rtw89_h2c_arp_offload *h2c; 8034 u32 len = sizeof(*h2c); 8035 struct sk_buff *skb; 8036 u8 pkt_id = 0; 8037 int ret; 8038 8039 if (enable) { 8040 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8041 RTW89_PKT_OFLD_TYPE_ARP_RSP, 8042 &pkt_id); 8043 if (ret) 8044 return ret; 8045 } 8046 8047 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8048 if (!skb) { 8049 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 8050 return -ENOMEM; 8051 } 8052 8053 skb_put(skb, len); 8054 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 8055 8056 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 8057 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 8058 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 8059 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 8060 8061 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8062 H2C_CAT_MAC, 8063 H2C_CL_MAC_WOW, 8064 H2C_FUNC_ARP_OFLD, 0, 1, 8065 len); 8066 8067 ret = rtw89_h2c_tx(rtwdev, skb, false); 8068 if (ret) { 8069 rtw89_err(rtwdev, "failed to send h2c\n"); 8070 goto fail; 8071 } 8072 8073 return 0; 8074 8075 fail: 8076 dev_kfree_skb_any(skb); 8077 8078 return ret; 8079 } 8080 8081 #define H2C_DISCONNECT_DETECT_LEN 8 8082 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 8083 struct rtw89_vif_link *rtwvif_link, bool enable) 8084 { 8085 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8086 struct sk_buff *skb; 8087 u8 macid = rtwvif_link->mac_id; 8088 int ret; 8089 8090 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 8091 if (!skb) { 8092 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8093 return -ENOMEM; 8094 } 8095 8096 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 8097 8098 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 8099 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 8100 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 8101 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 8102 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 8103 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 8104 } 8105 8106 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8107 H2C_CAT_MAC, 8108 H2C_CL_MAC_WOW, 8109 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 8110 H2C_DISCONNECT_DETECT_LEN); 8111 8112 ret = rtw89_h2c_tx(rtwdev, skb, false); 8113 if (ret) { 8114 rtw89_err(rtwdev, "failed to send h2c\n"); 8115 goto fail; 8116 } 8117 8118 return 0; 8119 8120 fail: 8121 dev_kfree_skb_any(skb); 8122 8123 return ret; 8124 } 8125 8126 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8127 bool enable) 8128 { 8129 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8130 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 8131 struct rtw89_h2c_cfg_nlo *h2c; 8132 u32 len = sizeof(*h2c); 8133 struct sk_buff *skb; 8134 int ret, i; 8135 8136 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8137 if (!skb) { 8138 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 8139 return -ENOMEM; 8140 } 8141 8142 skb_put(skb, len); 8143 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 8144 8145 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 8146 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 8147 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 8148 8149 if (enable) { 8150 h2c->nlo_cnt = nd_config->n_match_sets; 8151 for (i = 0 ; i < nd_config->n_match_sets; i++) { 8152 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 8153 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 8154 nd_config->match_sets[i].ssid.ssid_len); 8155 } 8156 } 8157 8158 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8159 H2C_CAT_MAC, 8160 H2C_CL_MAC_WOW, 8161 H2C_FUNC_NLO, 0, 1, 8162 len); 8163 8164 ret = rtw89_h2c_tx(rtwdev, skb, false); 8165 if (ret) { 8166 rtw89_err(rtwdev, "failed to send h2c\n"); 8167 goto fail; 8168 } 8169 8170 return 0; 8171 8172 fail: 8173 dev_kfree_skb_any(skb); 8174 return ret; 8175 } 8176 8177 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8178 bool enable) 8179 { 8180 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8181 struct rtw89_h2c_wow_global *h2c; 8182 u8 macid = rtwvif_link->mac_id; 8183 u32 len = sizeof(*h2c); 8184 struct sk_buff *skb; 8185 int ret; 8186 8187 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8188 if (!skb) { 8189 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 8190 return -ENOMEM; 8191 } 8192 8193 skb_put(skb, len); 8194 h2c = (struct rtw89_h2c_wow_global *)skb->data; 8195 8196 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 8197 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 8198 le32_encode_bits(rtw_wow->ptk_alg, 8199 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 8200 le32_encode_bits(rtw_wow->gtk_alg, 8201 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 8202 h2c->key_info = rtw_wow->key_info; 8203 8204 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8205 H2C_CAT_MAC, 8206 H2C_CL_MAC_WOW, 8207 H2C_FUNC_WOW_GLOBAL, 0, 1, 8208 len); 8209 8210 ret = rtw89_h2c_tx(rtwdev, skb, false); 8211 if (ret) { 8212 rtw89_err(rtwdev, "failed to send h2c\n"); 8213 goto fail; 8214 } 8215 8216 return 0; 8217 8218 fail: 8219 dev_kfree_skb_any(skb); 8220 8221 return ret; 8222 } 8223 8224 #define H2C_WAKEUP_CTRL_LEN 4 8225 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 8226 struct rtw89_vif_link *rtwvif_link, 8227 bool enable) 8228 { 8229 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8230 struct sk_buff *skb; 8231 u8 macid = rtwvif_link->mac_id; 8232 int ret; 8233 8234 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 8235 if (!skb) { 8236 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 8237 return -ENOMEM; 8238 } 8239 8240 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 8241 8242 if (rtw_wow->pattern_cnt) 8243 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 8244 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 8245 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 8246 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 8247 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 8248 8249 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 8250 8251 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8252 H2C_CAT_MAC, 8253 H2C_CL_MAC_WOW, 8254 H2C_FUNC_WAKEUP_CTRL, 0, 1, 8255 H2C_WAKEUP_CTRL_LEN); 8256 8257 ret = rtw89_h2c_tx(rtwdev, skb, false); 8258 if (ret) { 8259 rtw89_err(rtwdev, "failed to send h2c\n"); 8260 goto fail; 8261 } 8262 8263 return 0; 8264 8265 fail: 8266 dev_kfree_skb_any(skb); 8267 8268 return ret; 8269 } 8270 8271 #define H2C_WOW_CAM_UPD_LEN 24 8272 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 8273 struct rtw89_wow_cam_info *cam_info) 8274 { 8275 struct sk_buff *skb; 8276 int ret; 8277 8278 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 8279 if (!skb) { 8280 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8281 return -ENOMEM; 8282 } 8283 8284 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 8285 8286 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 8287 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 8288 if (cam_info->valid) { 8289 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 8290 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 8291 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 8292 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 8293 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 8294 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 8295 cam_info->negative_pattern_match); 8296 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 8297 cam_info->skip_mac_hdr); 8298 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 8299 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 8300 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 8301 } 8302 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 8303 8304 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8305 H2C_CAT_MAC, 8306 H2C_CL_MAC_WOW, 8307 H2C_FUNC_WOW_CAM_UPD, 0, 1, 8308 H2C_WOW_CAM_UPD_LEN); 8309 8310 ret = rtw89_h2c_tx(rtwdev, skb, false); 8311 if (ret) { 8312 rtw89_err(rtwdev, "failed to send h2c\n"); 8313 goto fail; 8314 } 8315 8316 return 0; 8317 fail: 8318 dev_kfree_skb_any(skb); 8319 8320 return ret; 8321 } 8322 8323 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 8324 struct rtw89_vif_link *rtwvif_link, 8325 bool enable) 8326 { 8327 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8328 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 8329 struct rtw89_h2c_wow_gtk_ofld *h2c; 8330 u8 macid = rtwvif_link->mac_id; 8331 u32 len = sizeof(*h2c); 8332 u8 pkt_id_sa_query = 0; 8333 struct sk_buff *skb; 8334 u8 pkt_id_eapol = 0; 8335 int ret; 8336 8337 if (!rtw_wow->gtk_alg) 8338 return 0; 8339 8340 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8341 if (!skb) { 8342 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 8343 return -ENOMEM; 8344 } 8345 8346 skb_put(skb, len); 8347 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 8348 8349 if (!enable) 8350 goto hdr; 8351 8352 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8353 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 8354 &pkt_id_eapol); 8355 if (ret) 8356 goto fail; 8357 8358 if (gtk_info->igtk_keyid) { 8359 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8360 RTW89_PKT_OFLD_TYPE_SA_QUERY, 8361 &pkt_id_sa_query); 8362 if (ret) 8363 goto fail; 8364 } 8365 8366 /* not support TKIP yet */ 8367 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 8368 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 8369 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 8370 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 8371 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 8372 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 8373 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 8374 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 8375 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 8376 h2c->gtk_info = rtw_wow->gtk_info; 8377 8378 hdr: 8379 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8380 H2C_CAT_MAC, 8381 H2C_CL_MAC_WOW, 8382 H2C_FUNC_GTK_OFLD, 0, 1, 8383 len); 8384 8385 ret = rtw89_h2c_tx(rtwdev, skb, false); 8386 if (ret) { 8387 rtw89_err(rtwdev, "failed to send h2c\n"); 8388 goto fail; 8389 } 8390 return 0; 8391 fail: 8392 dev_kfree_skb_any(skb); 8393 8394 return ret; 8395 } 8396 8397 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8398 bool enable) 8399 { 8400 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 8401 struct rtw89_h2c_fwips *h2c; 8402 u32 len = sizeof(*h2c); 8403 struct sk_buff *skb; 8404 8405 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8406 if (!skb) { 8407 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 8408 return -ENOMEM; 8409 } 8410 skb_put(skb, len); 8411 h2c = (struct rtw89_h2c_fwips *)skb->data; 8412 8413 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 8414 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 8415 8416 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8417 H2C_CAT_MAC, 8418 H2C_CL_MAC_PS, 8419 H2C_FUNC_IPS_CFG, 0, 1, 8420 len); 8421 8422 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 8423 } 8424 8425 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 8426 { 8427 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 8428 struct rtw89_h2c_wow_aoac *h2c; 8429 u32 len = sizeof(*h2c); 8430 struct sk_buff *skb; 8431 8432 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8433 if (!skb) { 8434 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 8435 return -ENOMEM; 8436 } 8437 8438 skb_put(skb, len); 8439 8440 /* This H2C only nofity firmware to generate AOAC report C2H, 8441 * no need any parameter. 8442 */ 8443 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8444 H2C_CAT_MAC, 8445 H2C_CL_MAC_WOW, 8446 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 8447 len); 8448 8449 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 8450 } 8451 8452 /* Return < 0, if failures happen during waiting for the condition. 8453 * Return 0, when waiting for the condition succeeds. 8454 * Return > 0, if the wait is considered unreachable due to driver/FW design, 8455 * where 1 means during SER. 8456 */ 8457 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 8458 struct rtw89_wait_info *wait, unsigned int cond) 8459 { 8460 int ret; 8461 8462 ret = rtw89_h2c_tx(rtwdev, skb, false); 8463 if (ret) { 8464 rtw89_err(rtwdev, "failed to send h2c\n"); 8465 dev_kfree_skb_any(skb); 8466 return -EBUSY; 8467 } 8468 8469 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 8470 return 1; 8471 8472 return rtw89_wait_for_cond(wait, cond); 8473 } 8474 8475 #define H2C_ADD_MCC_LEN 16 8476 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 8477 const struct rtw89_fw_mcc_add_req *p) 8478 { 8479 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8480 struct sk_buff *skb; 8481 unsigned int cond; 8482 8483 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 8484 if (!skb) { 8485 rtw89_err(rtwdev, 8486 "failed to alloc skb for add mcc\n"); 8487 return -ENOMEM; 8488 } 8489 8490 skb_put(skb, H2C_ADD_MCC_LEN); 8491 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 8492 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 8493 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 8494 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 8495 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 8496 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 8497 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 8498 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 8499 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 8500 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 8501 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 8502 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 8503 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 8504 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 8505 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 8506 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 8507 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 8508 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 8509 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 8510 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 8511 8512 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8513 H2C_CAT_MAC, 8514 H2C_CL_MCC, 8515 H2C_FUNC_ADD_MCC, 0, 0, 8516 H2C_ADD_MCC_LEN); 8517 8518 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 8519 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8520 } 8521 8522 #define H2C_START_MCC_LEN 12 8523 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 8524 const struct rtw89_fw_mcc_start_req *p) 8525 { 8526 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8527 struct sk_buff *skb; 8528 unsigned int cond; 8529 8530 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 8531 if (!skb) { 8532 rtw89_err(rtwdev, 8533 "failed to alloc skb for start mcc\n"); 8534 return -ENOMEM; 8535 } 8536 8537 skb_put(skb, H2C_START_MCC_LEN); 8538 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 8539 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 8540 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 8541 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 8542 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 8543 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 8544 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 8545 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 8546 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 8547 8548 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8549 H2C_CAT_MAC, 8550 H2C_CL_MCC, 8551 H2C_FUNC_START_MCC, 0, 0, 8552 H2C_START_MCC_LEN); 8553 8554 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 8555 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8556 } 8557 8558 #define H2C_STOP_MCC_LEN 4 8559 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 8560 bool prev_groups) 8561 { 8562 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8563 struct sk_buff *skb; 8564 unsigned int cond; 8565 8566 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 8567 if (!skb) { 8568 rtw89_err(rtwdev, 8569 "failed to alloc skb for stop mcc\n"); 8570 return -ENOMEM; 8571 } 8572 8573 skb_put(skb, H2C_STOP_MCC_LEN); 8574 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 8575 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 8576 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 8577 8578 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8579 H2C_CAT_MAC, 8580 H2C_CL_MCC, 8581 H2C_FUNC_STOP_MCC, 0, 0, 8582 H2C_STOP_MCC_LEN); 8583 8584 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 8585 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8586 } 8587 8588 #define H2C_DEL_MCC_GROUP_LEN 4 8589 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 8590 bool prev_groups) 8591 { 8592 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8593 struct sk_buff *skb; 8594 unsigned int cond; 8595 8596 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 8597 if (!skb) { 8598 rtw89_err(rtwdev, 8599 "failed to alloc skb for del mcc group\n"); 8600 return -ENOMEM; 8601 } 8602 8603 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 8604 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 8605 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 8606 8607 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8608 H2C_CAT_MAC, 8609 H2C_CL_MCC, 8610 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 8611 H2C_DEL_MCC_GROUP_LEN); 8612 8613 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 8614 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8615 } 8616 8617 #define H2C_RESET_MCC_GROUP_LEN 4 8618 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 8619 { 8620 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8621 struct sk_buff *skb; 8622 unsigned int cond; 8623 8624 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 8625 if (!skb) { 8626 rtw89_err(rtwdev, 8627 "failed to alloc skb for reset mcc group\n"); 8628 return -ENOMEM; 8629 } 8630 8631 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 8632 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 8633 8634 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8635 H2C_CAT_MAC, 8636 H2C_CL_MCC, 8637 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 8638 H2C_RESET_MCC_GROUP_LEN); 8639 8640 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 8641 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8642 } 8643 8644 #define H2C_MCC_REQ_TSF_LEN 4 8645 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 8646 const struct rtw89_fw_mcc_tsf_req *req, 8647 struct rtw89_mac_mcc_tsf_rpt *rpt) 8648 { 8649 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8650 struct rtw89_mac_mcc_tsf_rpt *tmp; 8651 struct sk_buff *skb; 8652 unsigned int cond; 8653 int ret; 8654 8655 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 8656 if (!skb) { 8657 rtw89_err(rtwdev, 8658 "failed to alloc skb for mcc req tsf\n"); 8659 return -ENOMEM; 8660 } 8661 8662 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 8663 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 8664 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 8665 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 8666 8667 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8668 H2C_CAT_MAC, 8669 H2C_CL_MCC, 8670 H2C_FUNC_MCC_REQ_TSF, 0, 0, 8671 H2C_MCC_REQ_TSF_LEN); 8672 8673 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 8674 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8675 if (ret) 8676 return ret; 8677 8678 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 8679 *rpt = *tmp; 8680 8681 return 0; 8682 } 8683 8684 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 8685 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 8686 u8 *bitmap) 8687 { 8688 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8689 struct sk_buff *skb; 8690 unsigned int cond; 8691 u8 map_len; 8692 u8 h2c_len; 8693 8694 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 8695 map_len = RTW89_MAX_MAC_ID_NUM / 8; 8696 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 8697 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 8698 if (!skb) { 8699 rtw89_err(rtwdev, 8700 "failed to alloc skb for mcc macid bitmap\n"); 8701 return -ENOMEM; 8702 } 8703 8704 skb_put(skb, h2c_len); 8705 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 8706 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 8707 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 8708 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 8709 8710 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8711 H2C_CAT_MAC, 8712 H2C_CL_MCC, 8713 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 8714 h2c_len); 8715 8716 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 8717 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8718 } 8719 8720 #define H2C_MCC_SYNC_LEN 4 8721 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 8722 u8 target, u8 offset) 8723 { 8724 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8725 struct sk_buff *skb; 8726 unsigned int cond; 8727 8728 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 8729 if (!skb) { 8730 rtw89_err(rtwdev, 8731 "failed to alloc skb for mcc sync\n"); 8732 return -ENOMEM; 8733 } 8734 8735 skb_put(skb, H2C_MCC_SYNC_LEN); 8736 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 8737 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 8738 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 8739 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 8740 8741 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8742 H2C_CAT_MAC, 8743 H2C_CL_MCC, 8744 H2C_FUNC_MCC_SYNC, 0, 0, 8745 H2C_MCC_SYNC_LEN); 8746 8747 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 8748 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8749 } 8750 8751 #define H2C_MCC_SET_DURATION_LEN 20 8752 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 8753 const struct rtw89_fw_mcc_duration *p) 8754 { 8755 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8756 struct sk_buff *skb; 8757 unsigned int cond; 8758 8759 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 8760 if (!skb) { 8761 rtw89_err(rtwdev, 8762 "failed to alloc skb for mcc set duration\n"); 8763 return -ENOMEM; 8764 } 8765 8766 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 8767 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 8768 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 8769 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 8770 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 8771 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 8772 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 8773 p->start_tsf_low); 8774 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 8775 p->start_tsf_high); 8776 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 8777 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 8778 8779 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8780 H2C_CAT_MAC, 8781 H2C_CL_MCC, 8782 H2C_FUNC_MCC_SET_DURATION, 0, 0, 8783 H2C_MCC_SET_DURATION_LEN); 8784 8785 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 8786 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8787 } 8788 8789 static 8790 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 8791 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 8792 struct rtw89_h2c_mrc_add_slot *slot_h2c) 8793 { 8794 bool fill_h2c = !!slot_h2c; 8795 unsigned int i; 8796 8797 if (!fill_h2c) 8798 goto calc_len; 8799 8800 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 8801 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 8802 le32_encode_bits(slot_arg->courtesy_en, 8803 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 8804 le32_encode_bits(slot_arg->role_num, 8805 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 8806 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 8807 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 8808 le32_encode_bits(slot_arg->courtesy_target, 8809 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 8810 8811 for (i = 0; i < slot_arg->role_num; i++) { 8812 slot_h2c->roles[i].w0 = 8813 le32_encode_bits(slot_arg->roles[i].macid, 8814 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 8815 le32_encode_bits(slot_arg->roles[i].role_type, 8816 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 8817 le32_encode_bits(slot_arg->roles[i].is_master, 8818 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 8819 le32_encode_bits(slot_arg->roles[i].en_tx_null, 8820 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 8821 le32_encode_bits(false, 8822 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 8823 le32_encode_bits(false, 8824 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 8825 slot_h2c->roles[i].w1 = 8826 le32_encode_bits(slot_arg->roles[i].central_ch, 8827 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 8828 le32_encode_bits(slot_arg->roles[i].primary_ch, 8829 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 8830 le32_encode_bits(slot_arg->roles[i].bw, 8831 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 8832 le32_encode_bits(slot_arg->roles[i].band, 8833 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 8834 le32_encode_bits(slot_arg->roles[i].null_early, 8835 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 8836 le32_encode_bits(false, 8837 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 8838 le32_encode_bits(true, 8839 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 8840 slot_h2c->roles[i].macid_main_bitmap = 8841 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 8842 slot_h2c->roles[i].macid_paired_bitmap = 8843 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 8844 } 8845 8846 calc_len: 8847 return struct_size(slot_h2c, roles, slot_arg->role_num); 8848 } 8849 8850 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 8851 const struct rtw89_fw_mrc_add_arg *arg) 8852 { 8853 struct rtw89_h2c_mrc_add *h2c_head; 8854 struct sk_buff *skb; 8855 unsigned int i; 8856 void *tmp; 8857 u32 len; 8858 int ret; 8859 8860 len = sizeof(*h2c_head); 8861 for (i = 0; i < arg->slot_num; i++) 8862 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 8863 8864 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8865 if (!skb) { 8866 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 8867 return -ENOMEM; 8868 } 8869 8870 skb_put(skb, len); 8871 tmp = skb->data; 8872 8873 h2c_head = tmp; 8874 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 8875 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 8876 le32_encode_bits(arg->sch_type, 8877 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 8878 le32_encode_bits(arg->slot_num, 8879 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 8880 le32_encode_bits(arg->btc_in_sch, 8881 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 8882 8883 tmp += sizeof(*h2c_head); 8884 for (i = 0; i < arg->slot_num; i++) 8885 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 8886 8887 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8888 H2C_CAT_MAC, 8889 H2C_CL_MRC, 8890 H2C_FUNC_ADD_MRC, 0, 0, 8891 len); 8892 8893 ret = rtw89_h2c_tx(rtwdev, skb, false); 8894 if (ret) { 8895 rtw89_err(rtwdev, "failed to send h2c\n"); 8896 dev_kfree_skb_any(skb); 8897 return -EBUSY; 8898 } 8899 8900 return 0; 8901 } 8902 8903 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 8904 const struct rtw89_fw_mrc_start_arg *arg) 8905 { 8906 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8907 struct rtw89_h2c_mrc_start *h2c; 8908 u32 len = sizeof(*h2c); 8909 struct sk_buff *skb; 8910 unsigned int cond; 8911 8912 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8913 if (!skb) { 8914 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 8915 return -ENOMEM; 8916 } 8917 8918 skb_put(skb, len); 8919 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 8920 8921 h2c->w0 = le32_encode_bits(arg->sch_idx, 8922 RTW89_H2C_MRC_START_W0_SCH_IDX) | 8923 le32_encode_bits(arg->old_sch_idx, 8924 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 8925 le32_encode_bits(arg->action, 8926 RTW89_H2C_MRC_START_W0_ACTION); 8927 8928 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8929 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8930 8931 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8932 H2C_CAT_MAC, 8933 H2C_CL_MRC, 8934 H2C_FUNC_START_MRC, 0, 0, 8935 len); 8936 8937 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 8938 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8939 } 8940 8941 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 8942 { 8943 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8944 struct rtw89_h2c_mrc_del *h2c; 8945 u32 len = sizeof(*h2c); 8946 struct sk_buff *skb; 8947 unsigned int cond; 8948 8949 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8950 if (!skb) { 8951 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 8952 return -ENOMEM; 8953 } 8954 8955 skb_put(skb, len); 8956 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 8957 8958 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 8959 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 8960 8961 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8962 H2C_CAT_MAC, 8963 H2C_CL_MRC, 8964 H2C_FUNC_DEL_MRC, 0, 0, 8965 len); 8966 8967 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 8968 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8969 } 8970 8971 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 8972 const struct rtw89_fw_mrc_req_tsf_arg *arg, 8973 struct rtw89_mac_mrc_tsf_rpt *rpt) 8974 { 8975 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8976 struct rtw89_h2c_mrc_req_tsf *h2c; 8977 struct rtw89_mac_mrc_tsf_rpt *tmp; 8978 struct sk_buff *skb; 8979 unsigned int i; 8980 u32 len; 8981 int ret; 8982 8983 len = struct_size(h2c, infos, arg->num); 8984 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8985 if (!skb) { 8986 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 8987 return -ENOMEM; 8988 } 8989 8990 skb_put(skb, len); 8991 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 8992 8993 h2c->req_tsf_num = arg->num; 8994 for (i = 0; i < arg->num; i++) 8995 h2c->infos[i] = 8996 u8_encode_bits(arg->infos[i].band, 8997 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 8998 u8_encode_bits(arg->infos[i].port, 8999 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 9000 9001 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9002 H2C_CAT_MAC, 9003 H2C_CL_MRC, 9004 H2C_FUNC_MRC_REQ_TSF, 0, 0, 9005 len); 9006 9007 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 9008 if (ret) 9009 return ret; 9010 9011 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 9012 *rpt = *tmp; 9013 9014 return 0; 9015 } 9016 9017 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 9018 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 9019 { 9020 struct rtw89_h2c_mrc_upd_bitmap *h2c; 9021 u32 len = sizeof(*h2c); 9022 struct sk_buff *skb; 9023 int ret; 9024 9025 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9026 if (!skb) { 9027 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 9028 return -ENOMEM; 9029 } 9030 9031 skb_put(skb, len); 9032 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 9033 9034 h2c->w0 = le32_encode_bits(arg->sch_idx, 9035 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 9036 le32_encode_bits(arg->action, 9037 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 9038 le32_encode_bits(arg->macid, 9039 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 9040 h2c->w1 = le32_encode_bits(arg->client_macid, 9041 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 9042 9043 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9044 H2C_CAT_MAC, 9045 H2C_CL_MRC, 9046 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 9047 len); 9048 9049 ret = rtw89_h2c_tx(rtwdev, skb, false); 9050 if (ret) { 9051 rtw89_err(rtwdev, "failed to send h2c\n"); 9052 dev_kfree_skb_any(skb); 9053 return -EBUSY; 9054 } 9055 9056 return 0; 9057 } 9058 9059 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 9060 const struct rtw89_fw_mrc_sync_arg *arg) 9061 { 9062 struct rtw89_h2c_mrc_sync *h2c; 9063 u32 len = sizeof(*h2c); 9064 struct sk_buff *skb; 9065 int ret; 9066 9067 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9068 if (!skb) { 9069 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 9070 return -ENOMEM; 9071 } 9072 9073 skb_put(skb, len); 9074 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 9075 9076 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 9077 le32_encode_bits(arg->src.port, 9078 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 9079 le32_encode_bits(arg->src.band, 9080 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 9081 le32_encode_bits(arg->dest.port, 9082 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 9083 le32_encode_bits(arg->dest.band, 9084 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 9085 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 9086 9087 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9088 H2C_CAT_MAC, 9089 H2C_CL_MRC, 9090 H2C_FUNC_MRC_SYNC, 0, 0, 9091 len); 9092 9093 ret = rtw89_h2c_tx(rtwdev, skb, false); 9094 if (ret) { 9095 rtw89_err(rtwdev, "failed to send h2c\n"); 9096 dev_kfree_skb_any(skb); 9097 return -EBUSY; 9098 } 9099 9100 return 0; 9101 } 9102 9103 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 9104 const struct rtw89_fw_mrc_upd_duration_arg *arg) 9105 { 9106 struct rtw89_h2c_mrc_upd_duration *h2c; 9107 struct sk_buff *skb; 9108 unsigned int i; 9109 u32 len; 9110 int ret; 9111 9112 len = struct_size(h2c, slots, arg->slot_num); 9113 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9114 if (!skb) { 9115 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 9116 return -ENOMEM; 9117 } 9118 9119 skb_put(skb, len); 9120 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 9121 9122 h2c->w0 = le32_encode_bits(arg->sch_idx, 9123 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 9124 le32_encode_bits(arg->slot_num, 9125 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 9126 le32_encode_bits(false, 9127 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 9128 9129 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 9130 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 9131 9132 for (i = 0; i < arg->slot_num; i++) { 9133 h2c->slots[i] = 9134 le32_encode_bits(arg->slots[i].slot_idx, 9135 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 9136 le32_encode_bits(arg->slots[i].duration, 9137 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 9138 } 9139 9140 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9141 H2C_CAT_MAC, 9142 H2C_CL_MRC, 9143 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 9144 len); 9145 9146 ret = rtw89_h2c_tx(rtwdev, skb, false); 9147 if (ret) { 9148 rtw89_err(rtwdev, "failed to send h2c\n"); 9149 dev_kfree_skb_any(skb); 9150 return -EBUSY; 9151 } 9152 9153 return 0; 9154 } 9155 9156 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 9157 { 9158 struct rtw89_h2c_ap_info *h2c; 9159 u32 len = sizeof(*h2c); 9160 struct sk_buff *skb; 9161 int ret; 9162 9163 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9164 if (!skb) { 9165 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 9166 return -ENOMEM; 9167 } 9168 9169 skb_put(skb, len); 9170 h2c = (struct rtw89_h2c_ap_info *)skb->data; 9171 9172 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 9173 9174 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9175 H2C_CAT_MAC, 9176 H2C_CL_AP, 9177 H2C_FUNC_AP_INFO, 0, 0, 9178 len); 9179 9180 ret = rtw89_h2c_tx(rtwdev, skb, false); 9181 if (ret) { 9182 rtw89_err(rtwdev, "failed to send h2c\n"); 9183 dev_kfree_skb_any(skb); 9184 return -EBUSY; 9185 } 9186 9187 return 0; 9188 } 9189 9190 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 9191 { 9192 int ret; 9193 9194 if (en) { 9195 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 9196 return 0; 9197 } else { 9198 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 9199 return 0; 9200 } 9201 9202 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 9203 if (ret) { 9204 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 9205 return ret; 9206 9207 /* During recovery, neither driver nor stack has full error 9208 * handling, so show a warning, but return 0 with refcount 9209 * increased normally. It can avoid underflow when calling 9210 * with @en == false later. 9211 */ 9212 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 9213 } 9214 9215 if (en) 9216 refcount_set(&rtwdev->refcount_ap_info, 1); 9217 9218 return 0; 9219 } 9220 9221 int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 9222 bool enable) 9223 { 9224 struct rtw89_wait_info *wait = &rtwdev->mlo.wait; 9225 struct rtw89_h2c_mlo_link_cfg *h2c; 9226 u8 mac_id = rtwvif_link->mac_id; 9227 u32 len = sizeof(*h2c); 9228 struct sk_buff *skb; 9229 unsigned int cond; 9230 int ret; 9231 9232 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9233 if (!skb) { 9234 rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n"); 9235 return -ENOMEM; 9236 } 9237 9238 skb_put(skb, len); 9239 h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data; 9240 9241 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) | 9242 le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION); 9243 9244 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9245 H2C_CAT_MAC, 9246 H2C_CL_MLO, 9247 H2C_FUNC_MLO_LINK_CFG, 0, 0, 9248 len); 9249 9250 cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG); 9251 9252 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9253 if (ret) { 9254 rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n", 9255 str_enable_disable(enable), rtwvif_link->link_id, ret); 9256 return ret; 9257 } 9258 9259 return 0; 9260 } 9261 9262 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 9263 { 9264 static const u8 zeros[U8_MAX] = {}; 9265 9266 return memcmp(ext_ptr, zeros, ext_len) == 0; 9267 } 9268 9269 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 9270 ({ \ 9271 u8 __var_sz = sizeof(*(e)); \ 9272 bool __accept; \ 9273 if (__var_sz >= (ent_sz)) \ 9274 __accept = true; \ 9275 else \ 9276 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 9277 (ent_sz) - __var_sz);\ 9278 __accept; \ 9279 }) 9280 9281 static bool 9282 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 9283 const void *cursor, 9284 const struct rtw89_txpwr_conf *conf) 9285 { 9286 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9287 return false; 9288 9289 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 9290 return false; 9291 9292 switch (e->rs) { 9293 case RTW89_RS_CCK: 9294 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 9295 return false; 9296 break; 9297 case RTW89_RS_OFDM: 9298 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 9299 return false; 9300 break; 9301 case RTW89_RS_MCS: 9302 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 9303 e->nss >= RTW89_NSS_NUM || 9304 e->ofdma >= RTW89_OFDMA_NUM) 9305 return false; 9306 break; 9307 case RTW89_RS_HEDCM: 9308 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 9309 e->nss >= RTW89_NSS_HEDCM_NUM || 9310 e->ofdma >= RTW89_OFDMA_NUM) 9311 return false; 9312 break; 9313 case RTW89_RS_OFFSET: 9314 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 9315 return false; 9316 break; 9317 default: 9318 return false; 9319 } 9320 9321 return true; 9322 } 9323 9324 static 9325 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 9326 const struct rtw89_txpwr_table *tbl) 9327 { 9328 const struct rtw89_txpwr_conf *conf = tbl->data; 9329 struct rtw89_fw_txpwr_byrate_entry entry = {}; 9330 struct rtw89_txpwr_byrate *byr_head; 9331 struct rtw89_rate_desc desc = {}; 9332 const void *cursor; 9333 u32 data; 9334 s8 *byr; 9335 int i; 9336 9337 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9338 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 9339 continue; 9340 9341 byr_head = &rtwdev->byr[entry.band][entry.bw]; 9342 data = le32_to_cpu(entry.data); 9343 desc.ofdma = entry.ofdma; 9344 desc.nss = entry.nss; 9345 desc.rs = entry.rs; 9346 9347 for (i = 0; i < entry.len; i++, data >>= 8) { 9348 desc.idx = entry.shf + i; 9349 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 9350 *byr = data & 0xff; 9351 } 9352 } 9353 } 9354 9355 static bool 9356 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 9357 const void *cursor, 9358 const struct rtw89_txpwr_conf *conf) 9359 { 9360 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9361 return false; 9362 9363 if (e->bw >= RTW89_2G_BW_NUM) 9364 return false; 9365 if (e->nt >= RTW89_NTX_NUM) 9366 return false; 9367 if (e->rs >= RTW89_RS_LMT_NUM) 9368 return false; 9369 if (e->bf >= RTW89_BF_NUM) 9370 return false; 9371 if (e->regd >= RTW89_REGD_NUM) 9372 return false; 9373 if (e->ch_idx >= RTW89_2G_CH_NUM) 9374 return false; 9375 9376 return true; 9377 } 9378 9379 static 9380 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 9381 { 9382 const struct rtw89_txpwr_conf *conf = &data->conf; 9383 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 9384 const void *cursor; 9385 9386 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9387 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 9388 continue; 9389 9390 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9391 [entry.ch_idx] = entry.v; 9392 } 9393 } 9394 9395 static bool 9396 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 9397 const void *cursor, 9398 const struct rtw89_txpwr_conf *conf) 9399 { 9400 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9401 return false; 9402 9403 if (e->bw >= RTW89_5G_BW_NUM) 9404 return false; 9405 if (e->nt >= RTW89_NTX_NUM) 9406 return false; 9407 if (e->rs >= RTW89_RS_LMT_NUM) 9408 return false; 9409 if (e->bf >= RTW89_BF_NUM) 9410 return false; 9411 if (e->regd >= RTW89_REGD_NUM) 9412 return false; 9413 if (e->ch_idx >= RTW89_5G_CH_NUM) 9414 return false; 9415 9416 return true; 9417 } 9418 9419 static 9420 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 9421 { 9422 const struct rtw89_txpwr_conf *conf = &data->conf; 9423 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 9424 const void *cursor; 9425 9426 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9427 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 9428 continue; 9429 9430 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9431 [entry.ch_idx] = entry.v; 9432 } 9433 } 9434 9435 static bool 9436 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 9437 const void *cursor, 9438 const struct rtw89_txpwr_conf *conf) 9439 { 9440 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9441 return false; 9442 9443 if (e->bw >= RTW89_6G_BW_NUM) 9444 return false; 9445 if (e->nt >= RTW89_NTX_NUM) 9446 return false; 9447 if (e->rs >= RTW89_RS_LMT_NUM) 9448 return false; 9449 if (e->bf >= RTW89_BF_NUM) 9450 return false; 9451 if (e->regd >= RTW89_REGD_NUM) 9452 return false; 9453 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 9454 return false; 9455 if (e->ch_idx >= RTW89_6G_CH_NUM) 9456 return false; 9457 9458 return true; 9459 } 9460 9461 static 9462 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 9463 { 9464 const struct rtw89_txpwr_conf *conf = &data->conf; 9465 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 9466 const void *cursor; 9467 9468 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9469 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 9470 continue; 9471 9472 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9473 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 9474 } 9475 } 9476 9477 static bool 9478 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 9479 const void *cursor, 9480 const struct rtw89_txpwr_conf *conf) 9481 { 9482 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9483 return false; 9484 9485 if (e->ru >= RTW89_RU_NUM) 9486 return false; 9487 if (e->nt >= RTW89_NTX_NUM) 9488 return false; 9489 if (e->regd >= RTW89_REGD_NUM) 9490 return false; 9491 if (e->ch_idx >= RTW89_2G_CH_NUM) 9492 return false; 9493 9494 return true; 9495 } 9496 9497 static 9498 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 9499 { 9500 const struct rtw89_txpwr_conf *conf = &data->conf; 9501 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 9502 const void *cursor; 9503 9504 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9505 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 9506 continue; 9507 9508 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 9509 } 9510 } 9511 9512 static bool 9513 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 9514 const void *cursor, 9515 const struct rtw89_txpwr_conf *conf) 9516 { 9517 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9518 return false; 9519 9520 if (e->ru >= RTW89_RU_NUM) 9521 return false; 9522 if (e->nt >= RTW89_NTX_NUM) 9523 return false; 9524 if (e->regd >= RTW89_REGD_NUM) 9525 return false; 9526 if (e->ch_idx >= RTW89_5G_CH_NUM) 9527 return false; 9528 9529 return true; 9530 } 9531 9532 static 9533 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 9534 { 9535 const struct rtw89_txpwr_conf *conf = &data->conf; 9536 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 9537 const void *cursor; 9538 9539 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9540 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 9541 continue; 9542 9543 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 9544 } 9545 } 9546 9547 static bool 9548 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 9549 const void *cursor, 9550 const struct rtw89_txpwr_conf *conf) 9551 { 9552 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9553 return false; 9554 9555 if (e->ru >= RTW89_RU_NUM) 9556 return false; 9557 if (e->nt >= RTW89_NTX_NUM) 9558 return false; 9559 if (e->regd >= RTW89_REGD_NUM) 9560 return false; 9561 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 9562 return false; 9563 if (e->ch_idx >= RTW89_6G_CH_NUM) 9564 return false; 9565 9566 return true; 9567 } 9568 9569 static 9570 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 9571 { 9572 const struct rtw89_txpwr_conf *conf = &data->conf; 9573 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 9574 const void *cursor; 9575 9576 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9577 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 9578 continue; 9579 9580 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 9581 [entry.ch_idx] = entry.v; 9582 } 9583 } 9584 9585 static bool 9586 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 9587 const void *cursor, 9588 const struct rtw89_txpwr_conf *conf) 9589 { 9590 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9591 return false; 9592 9593 if (e->band >= RTW89_BAND_NUM) 9594 return false; 9595 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 9596 return false; 9597 if (e->regd >= RTW89_REGD_NUM) 9598 return false; 9599 9600 return true; 9601 } 9602 9603 static 9604 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 9605 { 9606 const struct rtw89_txpwr_conf *conf = &data->conf; 9607 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 9608 const void *cursor; 9609 9610 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9611 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 9612 continue; 9613 9614 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 9615 } 9616 } 9617 9618 static bool 9619 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 9620 const void *cursor, 9621 const struct rtw89_txpwr_conf *conf) 9622 { 9623 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9624 return false; 9625 9626 if (e->band >= RTW89_BAND_NUM) 9627 return false; 9628 if (e->regd >= RTW89_REGD_NUM) 9629 return false; 9630 9631 return true; 9632 } 9633 9634 static 9635 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 9636 { 9637 const struct rtw89_txpwr_conf *conf = &data->conf; 9638 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 9639 const void *cursor; 9640 9641 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9642 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 9643 continue; 9644 9645 data->v[entry.band][entry.regd] = entry.v; 9646 } 9647 } 9648 9649 static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev, 9650 const struct rtw89_rfe_parms *parms) 9651 { 9652 const struct rtw89_chip_info *chip = rtwdev->chip; 9653 9654 if (chip->support_bands & BIT(NL80211_BAND_2GHZ) && 9655 !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru)) 9656 return false; 9657 9658 if (chip->support_bands & BIT(NL80211_BAND_5GHZ) && 9659 !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru)) 9660 return false; 9661 9662 if (chip->support_bands & BIT(NL80211_BAND_6GHZ) && 9663 !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru)) 9664 return false; 9665 9666 return true; 9667 } 9668 9669 const struct rtw89_rfe_parms * 9670 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 9671 const struct rtw89_rfe_parms *init) 9672 { 9673 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 9674 struct rtw89_rfe_parms *parms; 9675 9676 if (!rfe_data) 9677 return init; 9678 9679 parms = &rfe_data->rfe_parms; 9680 if (init) 9681 *parms = *init; 9682 9683 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 9684 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 9685 rfe_data->byrate.tbl.size = 0; /* don't care here */ 9686 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 9687 parms->byr_tbl = &rfe_data->byrate.tbl; 9688 } 9689 9690 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 9691 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 9692 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 9693 } 9694 9695 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 9696 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 9697 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 9698 } 9699 9700 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 9701 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 9702 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 9703 } 9704 9705 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) { 9706 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz); 9707 parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v; 9708 } 9709 9710 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) { 9711 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz); 9712 parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v; 9713 } 9714 9715 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) { 9716 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz); 9717 parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v; 9718 } 9719 9720 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 9721 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 9722 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 9723 } 9724 9725 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 9726 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 9727 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 9728 } 9729 9730 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 9731 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 9732 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 9733 } 9734 9735 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) { 9736 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz); 9737 parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v; 9738 } 9739 9740 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) { 9741 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz); 9742 parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v; 9743 } 9744 9745 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) { 9746 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz); 9747 parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v; 9748 } 9749 9750 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 9751 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 9752 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 9753 } 9754 9755 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 9756 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 9757 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 9758 } 9759 9760 parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms); 9761 9762 return parms; 9763 } 9764